repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fif4evr/tm_python | basic/wordcount.py | 1 | 3239 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
def print_words(filename):
dict_of_words = helper_function(filename)
for k, v in sorted(dict_of_words.items()): print k, v
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def helper_function(filename):
dict = {}
f = open(filename,'rU')
# for line in f:
# for word in line.split():
# if word.lower() in dict.keys():
# dict[word.lower()] += 1
# else:
# dict[word.lower()] = 1
# ##############I ACCIDENTALLY DID SOMETHING VERY SLOW... I FIXED IT BELOW
for word in f.read().split():
if word.lower() in dict:
dict[word.lower()] += 1
else:
dict[word.lower()] = 1
f.close
return dict
###
def print_top(filename):
list_of_words = helper_function(filename).items()
sorted_list = sorted(list_of_words, reverse=True, key=val_ret)
x = 0
while x < 20:
print sorted_list[x][0], sorted_list[x][1]
x += 1
def print_total(filename):
dict_of_words = helper_function(filename)
total = len(dict_of_words.keys())
print 'There are %d total distinct words in %s' % (total, filename)
def val_ret(tuple):
return tuple[-1]
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount | --totalwords} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
elif option == '--totalwords':
print_total(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | -4,697,922,564,135,596,000 | 29.556604 | 79 | 0.686323 | false |
fahhem/plumbum | plumbum/cli/termsize.py | 1 | 2789 | """
Terminal size utility
---------------------
"""
from __future__ import division, print_function, absolute_import
import os
import platform
import warnings
from struct import Struct
def get_terminal_size(default=(80, 25)):
"""
Get width and height of console; works on linux, os x, windows and cygwin
Adapted from https://gist.github.com/jtriley/1108174
Originally from: http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
if current_os == 'Windows': # pragma: no branch
size = _get_terminal_size_windows()
if not size:
# needed for window's python in cygwin's xterm!
size = _get_terminal_size_tput()
elif current_os in ('Linux', 'Darwin', 'FreeBSD', 'SunOS') or current_os.startswith('CYGWIN'):
size = _get_terminal_size_linux()
else:
warnings.warn("Plumbum does not know the type of the current OS for term size, defaulting to UNIX")
size = _get_terminal_size_linux()
if size is None: # we'll assume the standard 80x25 if for any reason we don't know the terminal size
size = default
return size
def _get_terminal_size_windows(): # pragma: no cover
try:
from ctypes import windll, create_string_buffer
STDERR_HANDLE = -12
h = windll.kernel32.GetStdHandle(STDERR_HANDLE)
csbi_struct = Struct("hhhhHhhhhhh")
csbi = create_string_buffer(csbi_struct.size)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
_, _, _, _, _, left, top, right, bottom, _, _ = csbi_struct.unpack(csbi.raw)
return right - left + 1, bottom - top + 1
return None
except Exception:
return None
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
from plumbum.cmd import tput
cols = int(tput('cols'))
rows = int(tput('lines'))
return (cols, rows)
except Exception:
return None
def _ioctl_GWINSZ(fd):
yx = Struct("hh")
try:
import fcntl
import termios
return yx.unpack(fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return None
def _get_terminal_size_linux():
cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = _ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not cr:
try:
cr = (int(os.environ['LINES']), int(os.environ['COLUMNS']))
except Exception:
return None
return cr[1], cr[0]
| mit | 3,767,219,890,869,019,600 | 31.811765 | 107 | 0.610255 | false |
dc3-plaso/plaso | tests/output/interface.py | 1 | 3099 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the output module interface."""
import unittest
from plaso.output import manager
from tests.cli import test_lib as cli_test_lib
from tests.output import test_lib
class TestEvent(object):
"""Test event object."""
def __init__(self, timestamp, entry):
"""Initializes an event object."""
super(TestEvent, self).__init__()
self.date = u'03/01/2012'
try:
self.timestamp = int(timestamp)
except ValueError:
self.timestamp = 0
self.entry = entry
def EqualityString(self):
"""Returns a string describing the event object in terms of object equality.
Returns:
A string representation of the event object that can be used for equality
comparison.
"""
return u';'.join(map(str, [self.timestamp, self.entry]))
class LinearOutputModuleTest(test_lib.OutputModuleTestCase):
"""Tests the linear output module."""
def testOutput(self):
"""Tests an implementation of output module."""
events = [
TestEvent(123456, u'My Event Is Now!'),
TestEvent(123458, u'There is no tomorrow.'),
TestEvent(123462, u'Tomorrow is now.'),
TestEvent(123489, u'This is just some stuff to fill the line.')]
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = test_lib.TestOutputModule(output_mediator)
output_module.SetOutputWriter(output_writer)
output_module.WriteHeader()
for event_object in events:
output_module.WriteEvent(event_object)
output_module.WriteFooter()
expected_output = (
b'<EventFile>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123456</Time>\n'
b'\t<Entry>My Event Is Now!</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123458</Time>\n'
b'\t<Entry>There is no tomorrow.</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123462</Time>\n'
b'\t<Entry>Tomorrow is now.</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123489</Time>\n'
b'\t<Entry>This is just some stuff to fill the line.</Entry>\n'
b'</Event>\n'
b'</EventFile>\n')
output = output_writer.ReadOutput()
self.assertEqual(output, expected_output)
def testOutputList(self):
"""Test listing up all available registered modules."""
manager.OutputManager.RegisterOutput(test_lib.TestOutputModule)
test_output_class = None
for name, output_class in manager.OutputManager.GetOutputClasses():
if name == u'test_xml':
test_output_class = output_class
expected_description = u'Test output that provides a simple mocked XML.'
self.assertIsNotNone(test_output_class)
self.assertEqual(test_output_class.DESCRIPTION, expected_description)
manager.OutputManager.DeregisterOutput(test_lib.TestOutputModule)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,403,706,449,500,411,000 | 29.99 | 80 | 0.64182 | false |
ZeitOnline/zeit.content.article | src/zeit/content/article/edit/tests/test_body.py | 1 | 8835 | import gocept.testing.mock
import lxml.etree
import mock
import unittest
import zeit.content.article.testing
import zope.schema
class EditableBodyTest(zeit.content.article.testing.FunctionalTestCase):
def setUp(self):
super(EditableBodyTest, self).setUp()
self.patches = gocept.testing.mock.Patches()
fake_uuid = mock.Mock()
fake_uuid.side_effect = lambda: 'id-%s' % fake_uuid.call_count
self.patches.add(
'zeit.edit.container.Base._generate_block_id', fake_uuid)
def tearDown(self):
self.patches.reset()
super(EditableBodyTest, self).tearDown()
def get_body(self, body=None):
import lxml.objectify
import zeit.content.article.article
import zeit.content.article.edit.body
if not body:
body = ("<division><p>Para1</p><p/></division>"
"<division><p>Para2</p><p/></division>")
article = zeit.content.article.article.Article()
article.xml.body = lxml.objectify.XML(
'<body>%s</body>' % body)
for division in article.xml.body.findall('division'):
division.set('type', 'page')
return zeit.content.article.edit.body.EditableBody(
article, article.xml.body)
def test_keys_contain_division_contents(self):
body = self.get_body()
# The first division is omitted, thus only 5 keys
self.assertEqual(5, len(body.keys()))
# Starts at 2 as the first <division> is skipped but still gets a key
self.assertEqual(['id-2', 'id-3', 'id-4', 'id-5', 'id-6'], body.keys())
def test_deleting_division_should_merge_contained_paragraphs(self):
body = self.get_body()
# Note: calling for the first time keys() actually makes the keys
# available.
self.assertEqual(['id-2', 'id-3', 'id-4', 'id-5', 'id-6'], body.keys())
del body['id-4']
self.assertEqual(['id-2', 'id-3', 'id-5', 'id-6'], body.keys())
def test_add_should_add_to_last_division(self):
import lxml.objectify
body = self.get_body('<division/>')
block = mock.Mock()
block.xml = lxml.objectify.E.mockblock()
block.__name__ = 'myblock'
block.__parent__ = None
block.xml.set('{http://namespaces.zeit.de/CMS/cp}__name__', 'myblock')
body.add(block)
self.assertEqual(['myblock'], body.keys())
def test_update_order_should_put_object_into_right_division(self):
body = self.get_body()
self.assertEqual(['id-2', 'id-3', 'id-4', 'id-5', 'id-6'], body.keys())
body.updateOrder(['id-2', 'id-3', 'id-5', 'id-4', 'id-6'])
self.assertEqual(['id-2', 'id-3', 'id-5', 'id-4', 'id-6'], body.keys())
body.updateOrder(['id-2', 'id-4', 'id-5', 'id-3', 'id-6'])
self.assertEqual(['id-2', 'id-4', 'id-5', 'id-3', 'id-6'], body.keys())
del body['id-2']
body.updateOrder(['id-4', 'id-3', 'id-5', 'id-6'])
self.assertEqual(['id-4', 'id-3', 'id-5', 'id-6'], body.keys())
def test_articles_without_division_should_be_migrated(self):
body = self.get_body(
'<foo>Honk</foo><p>I have no division</p><p>Only paras</p>')
self.assertEqual(['id-2', 'id-3'], body.keys())
self.assertEqual(
['foo', 'division'],
[child.tag for child in body.xml.iterchildren()])
self.assertEqual(
['p', 'p'],
[child.tag for child in body.xml.division.iterchildren()])
self.assertEqual(
[u'I have no division', u'Only paras'],
[unicode(child) for child in body.xml.division.iterchildren()])
def test_adding_to_articles_without_division_should_migrate(self):
import lxml.objectify
body = self.get_body(
'<foo>Honk</foo><p>I have no division</p><p>Only paras</p>')
ob = mock.Mock()
ob.__name__ = None
ob.__parent__ = None
ob.xml = lxml.objectify.E.ob()
body.add(ob)
# XXX assertion?!
def test_nested_elements_should_be_ignored(self):
body = self.get_body(
'<division><p>I have <p>another para</p> in me</p></division>')
self.assertEqual([u'id-2'], body.keys())
def test_adding_division_should_add_on_toplevel(self):
from zeit.content.article.edit.interfaces import IDivision
import lxml.objectify
body = self.get_body('<division/>')
block = mock.Mock()
zope.interface.alsoProvides(block, IDivision)
block.xml = lxml.objectify.E.division()
block.__name__ = 'myblock'
block.__parent__ = None
block.xml.set('{http://namespaces.zeit.de/CMS/cp}__name__', 'myblock')
body.add(block)
self.assertEqual(2, len(body.xml.getchildren()))
def test_values_does_not_set_block_ids(self):
body = self.get_body()
def find_id_attributes():
return body.xml.xpath(
'//*[@ns:__name__]',
namespaces={'ns': 'http://namespaces.zeit.de/CMS/cp'})
self.assertFalse(find_id_attributes())
body.values()
self.assertFalse(find_id_attributes())
body.keys()
self.assertTrue(find_id_attributes())
def test_values_returns_same_blocks_as_keys(self):
body = self.get_body()
self.assertEqual(
[x.xml for x in body.values()],
[body[x].xml for x in body.keys()])
class TestCleaner(unittest.TestCase):
def get_article(self):
from zeit.content.article.article import Article
return Article()
def assert_key(self, node, expected):
have = node.get('{http://namespaces.zeit.de/CMS/cp}__name__')
if expected is None:
self.assertIsNone(have)
else:
self.assertEqual(expected, have)
def set_key(self, node, key):
node.set('{http://namespaces.zeit.de/CMS/cp}__name__', key)
def clean(self, obj):
from zeit.content.article.edit.body import remove_name_attributes
remove_name_attributes(obj, mock.sentinel.event)
def test_should_remove_name_attributes(self):
art = self.get_article()
art.xml.body.division = ''
self.set_key(art.xml.body.division, 'divname')
self.clean(art)
self.assert_key(art.xml.body.division, None)
def test_should_remove_namespace(self):
art = self.get_article()
art.xml.body.division = ''
self.set_key(art.xml.body.division, 'divname')
self.clean(art)
self.assertNotIn(
'namespaces.zeit.de/CMS/cp', lxml.etree.tostring(art.xml))
class ArticleValidatorTest(zeit.content.article.testing.FunctionalTestCase):
def test_children_should_return_elements(self):
import lxml.objectify
import zeit.content.article.article
import zeit.content.article.edit.body
import zeit.edit.interfaces
body = '<division type="page"><p>Para1</p><p>Para2</p></division>'
article = zeit.content.article.article.Article()
article.xml.body = lxml.objectify.XML('<body>%s</body>' % body)
body = zeit.content.article.edit.body.EditableBody(
article, article.xml.body)
validator = zeit.edit.interfaces.IValidator(article)
self.assertEqual(
[x.__name__ for x in body.values()],
[x.__name__ for x in validator.children])
class CheckinTest(zeit.content.article.testing.FunctionalTestCase):
def test_validation_errors_should_veto_checkin(self):
from zeit.cms.checkout.interfaces import ICheckinManager
from zeit.cms.checkout.interfaces import ICheckoutManager
import zeit.content.article.article
self.repository['article'] = zeit.content.article.article.Article()
manager = ICheckoutManager(self.repository['article'])
co = manager.checkout()
manager = ICheckinManager(co)
self.assertFalse(manager.canCheckin)
errors = dict(manager.last_validation_error)
self.assertIsInstance(
errors['title'], zope.schema.ValidationError)
def test_security_proxied_fields_should_be_validated_correctly(self):
from zeit.cms.checkout.interfaces import ICheckinManager
from zeit.cms.checkout.interfaces import ICheckoutManager
import zeit.content.article.article
self.repository['article'] = zeit.content.article.article.Article()
manager = ICheckoutManager(self.repository['article'])
co = manager.checkout()
co = zope.security.proxy.ProxyFactory(co)
manager = ICheckinManager(co)
self.assertFalse(manager.canCheckin)
errors = dict(manager.last_validation_error)
# the default for keywords is an empty tuple
self.assertNotIn('keywords', errors)
| bsd-3-clause | -2,541,782,704,224,213,000 | 38.266667 | 79 | 0.609734 | false |
donalm/thrum | docs/conf.py | 1 | 8373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# thrum documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import thrum
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Thrum'
copyright = u"2017, Dónal McMullan"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = thrum.__version__
# The full version, including alpha/beta/rc tags.
release = thrum.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'thrumdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'thrum.tex',
u'Thrum Documentation',
u'Dónal McMullan', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'thrum',
u'Thrum Documentation',
[u'Dónal McMullan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'thrum',
u'Thrum Documentation',
u'Dónal McMullan',
'thrum',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -5,124,257,481,176,899,000 | 29.432727 | 76 | 0.703429 | false |
nirvn/QGIS | python/plugins/processing/modeler/ModelerParameterDefinitionDialog.py | 1 | 24066 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerParameterDefinitionDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
from qgis.gui import QgsExpressionLineEdit, QgsProjectionSelectionWidget
from qgis.core import (QgsSettings,
QgsProcessing,
QgsCoordinateReferenceSystem,
QgsProcessingParameterDefinition,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterMapLayer,
QgsProcessingParameterExtent,
QgsProcessingParameterPoint,
QgsProcessingParameterFile,
QgsProcessingParameterMatrix,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterNumber,
QgsProcessingParameterRange,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterExpression,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterBand)
from qgis.PyQt.QtCore import (Qt,
QByteArray)
from qgis.PyQt.QtWidgets import (QDialog,
QVBoxLayout,
QLabel,
QLineEdit,
QComboBox,
QCheckBox,
QDialogButtonBox,
QMessageBox)
class ModelerParameterDefinitionDialog(QDialog):
PARAMETER_NUMBER = 'Number'
PARAMETER_RASTER = 'Raster Layer'
PARAMETER_TABLE = 'Vector Layer'
PARAMETER_VECTOR = 'Feature Source'
PARAMETER_STRING = 'String'
PARAMETER_EXPRESSION = 'Expression'
PARAMETER_BOOLEAN = 'Boolean'
PARAMETER_TABLE_FIELD = 'Layer Field'
PARAMETER_EXTENT = 'Extent'
PARAMETER_FILE = 'File'
PARAMETER_POINT = 'Point'
PARAMETER_CRS = 'CRS'
PARAMETER_MULTIPLE = 'Multiple Input'
PARAMETER_BAND = 'Raster Band'
PARAMETER_MAP_LAYER = 'Map Layer'
paramTypes = [
PARAMETER_BOOLEAN,
PARAMETER_EXTENT,
PARAMETER_FILE,
PARAMETER_NUMBER,
PARAMETER_RASTER,
PARAMETER_STRING,
PARAMETER_EXPRESSION,
PARAMETER_MAP_LAYER,
PARAMETER_TABLE,
PARAMETER_TABLE_FIELD,
PARAMETER_VECTOR,
PARAMETER_POINT,
PARAMETER_CRS,
PARAMETER_MULTIPLE,
PARAMETER_BAND
]
def __init__(self, alg, paramType=None, param=None):
self.alg = alg
self.paramType = paramType
self.param = param
QDialog.__init__(self)
self.setModal(True)
self.setupUi()
settings = QgsSettings()
self.restoreGeometry(settings.value("/Processing/modelParametersDefinitionDialogGeometry", QByteArray()))
def closeEvent(self, event):
settings = QgsSettings()
settings.setValue("/Processing/modelParametersDefinitionDialogGeometry", self.saveGeometry())
super(ModelerParameterDefinitionDialog, self).closeEvent(event)
def setupUi(self):
self.setWindowTitle(self.tr('Parameter Definition'))
self.setMinimumWidth(300)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setMargin(20)
self.label = QLabel(self.tr('Parameter name'))
self.verticalLayout.addWidget(self.label)
self.nameTextBox = QLineEdit()
self.verticalLayout.addWidget(self.nameTextBox)
if isinstance(self.param, QgsProcessingParameterDefinition):
self.nameTextBox.setText(self.param.description())
if self.paramType == ModelerParameterDefinitionDialog.PARAMETER_BOOLEAN or \
isinstance(self.param, QgsProcessingParameterBoolean):
self.state = QCheckBox()
self.state.setText(self.tr('Checked'))
self.state.setChecked(False)
if self.param is not None:
self.state.setChecked(bool(self.param.defaultValue()))
self.verticalLayout.addWidget(self.state)
elif self.paramType == ModelerParameterDefinitionDialog.PARAMETER_TABLE_FIELD or \
isinstance(self.param, QgsProcessingParameterField):
self.verticalLayout.addWidget(QLabel(self.tr('Parent layer')))
self.parentCombo = QComboBox()
idx = 0
for param in list(self.alg.parameterComponents().values()):
definition = self.alg.parameterDefinition(param.parameterName())
if isinstance(definition, (QgsProcessingParameterFeatureSource, QgsProcessingParameterVectorLayer)):
self.parentCombo.addItem(definition.description(), definition.name())
if self.param is not None:
if self.param.parentLayerParameterName() == definition.name():
self.parentCombo.setCurrentIndex(idx)
idx += 1
self.verticalLayout.addWidget(self.parentCombo)
# add the datatype selector
self.verticalLayout.addWidget(QLabel(self.tr('Allowed data type')))
self.datatypeCombo = QComboBox()
self.datatypeCombo.addItem(self.tr('Any'), -1)
self.datatypeCombo.addItem(self.tr('Number'), 0)
self.datatypeCombo.addItem(self.tr('String'), 1)
self.datatypeCombo.addItem(self.tr('Date/time'), 2)
self.verticalLayout.addWidget(self.datatypeCombo)
if self.param is not None and self.param.dataType() is not None:
# QComboBoxes indexes start at 0,
# self.param.datatype start with -1 that is why I need to do +1
datatypeIndex = self.param.dataType() + 1
self.datatypeCombo.setCurrentIndex(datatypeIndex)
self.multipleCheck = QCheckBox()
self.multipleCheck.setText(self.tr('Accept multiple fields'))
self.multipleCheck.setChecked(False)
if self.param is not None:
self.multipleCheck.setChecked(self.param.allowMultiple())
self.verticalLayout.addWidget(self.multipleCheck)
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.defaultTextBox = QLineEdit()
self.defaultTextBox.setToolTip(
self.tr('Default field name, or ; separated list of field names for multiple field parameters'))
if self.param is not None:
default = self.param.defaultValue()
if default is not None:
self.defaultTextBox.setText(str(default))
self.verticalLayout.addWidget(self.defaultTextBox)
elif self.paramType == ModelerParameterDefinitionDialog.PARAMETER_BAND or \
isinstance(self.param, QgsProcessingParameterBand):
self.verticalLayout.addWidget(QLabel(self.tr('Parent layer')))
self.parentCombo = QComboBox()
idx = 0
for param in list(self.alg.parameterComponents().values()):
definition = self.alg.parameterDefinition(param.parameterName())
if isinstance(definition, (QgsProcessingParameterRasterLayer)):
self.parentCombo.addItem(definition.description(), definition.name())
if self.param is not None:
if self.param.parentLayerParameterName() == definition.name():
self.parentCombo.setCurrentIndex(idx)
idx += 1
self.verticalLayout.addWidget(self.parentCombo)
elif (self.paramType in (
ModelerParameterDefinitionDialog.PARAMETER_VECTOR, ModelerParameterDefinitionDialog.PARAMETER_TABLE) or
isinstance(self.param, (QgsProcessingParameterFeatureSource, QgsProcessingParameterVectorLayer))):
self.verticalLayout.addWidget(QLabel(self.tr('Geometry type')))
self.shapetypeCombo = QComboBox()
self.shapetypeCombo.addItem(self.tr('Geometry Not Required'), QgsProcessing.TypeVector)
self.shapetypeCombo.addItem(self.tr('Point'), QgsProcessing.TypeVectorPoint)
self.shapetypeCombo.addItem(self.tr('Line'), QgsProcessing.TypeVectorLine)
self.shapetypeCombo.addItem(self.tr('Polygon'), QgsProcessing.TypeVectorPolygon)
self.shapetypeCombo.addItem(self.tr('Any Geometry Type'), QgsProcessing.TypeVectorAnyGeometry)
if self.param is not None:
self.shapetypeCombo.setCurrentIndex(self.shapetypeCombo.findData(self.param.dataTypes()[0]))
self.verticalLayout.addWidget(self.shapetypeCombo)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_MULTIPLE or
isinstance(self.param, QgsProcessingParameterMultipleLayers)):
self.verticalLayout.addWidget(QLabel(self.tr('Data type')))
self.datatypeCombo = QComboBox()
self.datatypeCombo.addItem(self.tr('Any Map Layer'), QgsProcessing.TypeMapLayer)
self.datatypeCombo.addItem(self.tr('Vector (No Geometry Required)'), QgsProcessing.TypeVector)
self.datatypeCombo.addItem(self.tr('Vector (Point)'), QgsProcessing.TypeVectorPoint)
self.datatypeCombo.addItem(self.tr('Vector (Line)'), QgsProcessing.TypeVectorLine)
self.datatypeCombo.addItem(self.tr('Vector (Polygon)'), QgsProcessing.TypeVectorPolygon)
self.datatypeCombo.addItem(self.tr('Vector (Any Geometry Type)'), QgsProcessing.TypeVectorAnyGeometry)
self.datatypeCombo.addItem(self.tr('Raster'), QgsProcessing.TypeRaster)
self.datatypeCombo.addItem(self.tr('File'), QgsProcessing.TypeFile)
if self.param is not None:
self.datatypeCombo.setCurrentIndex(self.datatypeCombo.findData(self.param.layerType()))
self.verticalLayout.addWidget(self.datatypeCombo)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_NUMBER or
isinstance(self.param, QgsProcessingParameterNumber)):
self.verticalLayout.addWidget(QLabel(self.tr('Min value')))
self.minTextBox = QLineEdit()
self.verticalLayout.addWidget(self.minTextBox)
self.verticalLayout.addWidget(QLabel(self.tr('Max value')))
self.maxTextBox = QLineEdit()
self.verticalLayout.addWidget(self.maxTextBox)
if self.param is not None:
self.minTextBox.setText(str(self.param.minimum()))
self.maxTextBox.setText(str(self.param.maximum()))
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.defaultTextBox = QLineEdit()
self.defaultTextBox.setText(self.tr('0'))
if self.param is not None:
default = self.param.defaultValue()
if self.param.dataType() == QgsProcessingParameterNumber.Integer:
default = int(math.floor(default))
if default:
self.defaultTextBox.setText(str(default))
self.verticalLayout.addWidget(self.defaultTextBox)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_EXPRESSION or
isinstance(self.param, QgsProcessingParameterExpression)):
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.defaultEdit = QgsExpressionLineEdit()
if self.param is not None:
self.defaultEdit.setExpression(self.param.defaultValue())
self.verticalLayout.addWidget(self.defaultEdit)
self.verticalLayout.addWidget(QLabel(self.tr('Parent layer')))
self.parentCombo = QComboBox()
self.parentCombo.addItem(self.tr("None"), None)
idx = 1
for param in list(self.alg.parameterComponents().values()):
definition = self.alg.parameterDefinition(param.parameterName())
if isinstance(definition, (QgsProcessingParameterFeatureSource, QgsProcessingParameterVectorLayer)):
self.parentCombo.addItem(definition.description(), definition.name())
if self.param is not None:
if self.param.parentLayerParameterName() == definition.name():
self.parentCombo.setCurrentIndex(idx)
idx += 1
self.verticalLayout.addWidget(self.parentCombo)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_STRING or
isinstance(self.param, QgsProcessingParameterString)):
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.defaultTextBox = QLineEdit()
if self.param is not None:
self.defaultTextBox.setText(self.param.defaultValue())
self.verticalLayout.addWidget(self.defaultTextBox)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_FILE or
isinstance(self.param, QgsProcessingParameterFile)):
self.verticalLayout.addWidget(QLabel(self.tr('Type')))
self.fileFolderCombo = QComboBox()
self.fileFolderCombo.addItem(self.tr('File'))
self.fileFolderCombo.addItem(self.tr('Folder'))
if self.param is not None:
self.fileFolderCombo.setCurrentIndex(
1 if self.param.behavior() == QgsProcessingParameterFile.Folder else 0)
self.verticalLayout.addWidget(self.fileFolderCombo)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_POINT or
isinstance(self.param, QgsProcessingParameterPoint)):
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.defaultTextBox = QLineEdit()
if self.param is not None:
self.defaultTextBox.setText(self.param.defaultValue())
self.verticalLayout.addWidget(self.defaultTextBox)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_CRS or
isinstance(self.param, QgsProcessingParameterCrs)):
self.verticalLayout.addWidget(QLabel(self.tr('Default value')))
self.selector = QgsProjectionSelectionWidget()
if self.param is not None:
self.selector.setCrs(QgsCoordinateReferenceSystem(self.param.defaultValue()))
else:
self.selector.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
self.verticalLayout.addWidget(self.selector)
self.verticalLayout.addSpacing(20)
self.requiredCheck = QCheckBox()
self.requiredCheck.setText(self.tr('Mandatory'))
self.requiredCheck.setChecked(True)
if self.param is not None:
self.requiredCheck.setChecked(not self.param.flags() & QgsProcessingParameterDefinition.FlagOptional)
self.verticalLayout.addWidget(self.requiredCheck)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel |
QDialogButtonBox.Ok)
self.buttonBox.setObjectName('buttonBox')
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.verticalLayout.addStretch()
self.verticalLayout.addWidget(self.buttonBox)
self.setLayout(self.verticalLayout)
def accept(self):
description = str(self.nameTextBox.text())
if description.strip() == '':
QMessageBox.warning(self, self.tr('Unable to define parameter'),
self.tr('Invalid parameter name'))
return
if self.param is None:
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
safeName = ''.join(c for c in description if c in validChars)
name = safeName.lower()
i = 2
while self.alg.parameterDefinition(name):
name = safeName.lower() + str(i)
i += 1
else:
name = self.param.name()
if (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_BOOLEAN or
isinstance(self.param, QgsProcessingParameterBoolean)):
self.param = QgsProcessingParameterBoolean(name, description, self.state.isChecked())
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_TABLE_FIELD or
isinstance(self.param, QgsProcessingParameterField)):
if self.parentCombo.currentIndex() < 0:
QMessageBox.warning(self, self.tr('Unable to define parameter'),
self.tr('Wrong or missing parameter values'))
return
parent = self.parentCombo.currentData()
datatype = self.datatypeCombo.currentData()
default = self.defaultTextBox.text()
if not default:
default = None
self.param = QgsProcessingParameterField(name, description, defaultValue=default,
parentLayerParameterName=parent, type=datatype,
allowMultiple=self.multipleCheck.isChecked())
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_BAND or
isinstance(self.param, QgsProcessingParameterBand)):
if self.parentCombo.currentIndex() < 0:
QMessageBox.warning(self, self.tr('Unable to define parameter'),
self.tr('Wrong or missing parameter values'))
return
parent = self.parentCombo.currentData()
self.param = QgsProcessingParameterBand(name, description, None, parent)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_MAP_LAYER or
isinstance(self.param, QgsProcessingParameterMapLayer)):
self.param = QgsProcessingParameterMapLayer(
name, description)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_RASTER or
isinstance(self.param, QgsProcessingParameterRasterLayer)):
self.param = QgsProcessingParameterRasterLayer(
name, description)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_TABLE or
isinstance(self.param, QgsProcessingParameterVectorLayer)):
self.param = QgsProcessingParameterVectorLayer(
name, description,
[self.shapetypeCombo.currentData()])
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_VECTOR or
isinstance(self.param, QgsProcessingParameterFeatureSource)):
self.param = QgsProcessingParameterFeatureSource(
name, description,
[self.shapetypeCombo.currentData()])
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_MULTIPLE or
isinstance(self.param, QgsProcessingParameterMultipleLayers)):
self.param = QgsProcessingParameterMultipleLayers(
name, description,
self.datatypeCombo.currentData())
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_NUMBER or
isinstance(self.param, QgsProcessingParameterNumber)):
try:
self.param = QgsProcessingParameterNumber(name, description, QgsProcessingParameterNumber.Double,
self.defaultTextBox.text())
vmin = self.minTextBox.text().strip()
if not vmin == '':
self.param.setMinimum(float(vmin))
vmax = self.maxTextBox.text().strip()
if not vmax == '':
self.param.setMaximum(float(vmax))
except:
QMessageBox.warning(self, self.tr('Unable to define parameter'),
self.tr('Wrong or missing parameter values'))
return
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_EXPRESSION or
isinstance(self.param, QgsProcessingParameterExpression)):
parent = self.parentCombo.currentData()
self.param = QgsProcessingParameterExpression(name, description,
str(self.defaultEdit.expression()),
parent)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_STRING or
isinstance(self.param, QgsProcessingParameterString)):
self.param = QgsProcessingParameterString(name, description,
str(self.defaultTextBox.text()))
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_EXTENT or
isinstance(self.param, QgsProcessingParameterExtent)):
self.param = QgsProcessingParameterExtent(name, description)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_FILE or
isinstance(self.param, QgsProcessingParameterFile)):
isFolder = self.fileFolderCombo.currentIndex() == 1
self.param = QgsProcessingParameterFile(name, description,
QgsProcessingParameterFile.Folder if isFolder else QgsProcessingParameterFile.File)
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_POINT or
isinstance(self.param, QgsProcessingParameterPoint)):
self.param = QgsProcessingParameterPoint(name, description,
str(self.defaultTextBox.text()))
elif (self.paramType == ModelerParameterDefinitionDialog.PARAMETER_CRS or
isinstance(self.param, QgsProcessingParameterCrs)):
self.param = QgsProcessingParameterCrs(name, description, self.selector.crs().authid())
if not self.requiredCheck.isChecked():
self.param.setFlags(self.param.flags() | QgsProcessingParameterDefinition.FlagOptional)
settings = QgsSettings()
settings.setValue("/Processing/modelParametersDefinitionDialogGeometry", self.saveGeometry())
QDialog.accept(self)
def reject(self):
self.param = None
settings = QgsSettings()
settings.setValue("/Processing/modelParametersDefinitionDialogGeometry", self.saveGeometry())
QDialog.reject(self)
| gpl-2.0 | -6,669,394,610,413,727,000 | 53.202703 | 135 | 0.617136 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_public_ip_address_configuration.py | 1 | 1763 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetPublicIPAddressConfiguration(Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress
configuration.
:param name: The publicIP address configuration name.
:type name: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param dns_settings: The dns settings to be applied on the publicIP
addresses .
:type dns_settings:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
}
def __init__(self, name, idle_timeout_in_minutes=None, dns_settings=None):
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__()
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
| mit | -8,317,720,466,806,835,000 | 40 | 131 | 0.648327 | false |
PaulSD/cgroupsd | lib/proc_events.py | 1 | 18741 | #!/usr/bin/env python
#
# Copyright 2015 Paul Donohue <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
#
#
# Linux Process Events Module
#
# This module listens to the Linux Kernel's Process Events Connector and calls configured event
# handler functions as events are generated.
#
# Users of this module must be careful to filter events generated by threads or processes spawned
# by this process to avoid infinite loops. When possible, it is best to simply avoid implementing
# any behavior in an event handler that might spawn a thread or process or otherwise generate an
# event.
#
# See the comments in libnl.py for prerequisites.
#
# Basic Usage:
# import proc_events
# def exec_handler(event, pid, tid):
# print('PID {0} started'.format(pid))
# proc_events.handlers['exec'] += exec_handler
# def any_handler(event, **data):
# print('Got {0} event'.format(event))
# for event in proc_events.handlers.iterkeys(): proc_events.handlers[event] += any_handler
#
# Supported Events:
# handlers['fork'] -> callback(event='fork', pid, tid, parent_pid, parent_tid)
# Process has been created via fork()
# handlers['exec'] -> callback(event='exec', pid, tid)
# Process has been replaced via exec()
# handlers['uid'] -> callback(event='uid', pid, tid, real_uid, effective_uid)
# Process UID changed (Arguments are the new real/effective UID)
# handlers['gid'] -> callback(event='gid', pid, tid, real_gid, effective_gid)
# Process GID changed (Arguments are the new real/effective GID)
# handlers['sid'] -> callback(event='sid', pid, tid)
# Process has become a session leader (See http://lwn.net/Articles/337708/ )
# handlers['ptrace'] -> callback(event='ptrace', pid, tid, tracer_pid, tracer_tid)
# ptrace() has attached to process
# handlers['comm'] -> callback(event='comm', pid, tid, command)
# Process command name has changed (See https://lkml.org/lkml/2011/8/2/276 )
# handlers['coredump'] -> callback(event='coredump', pid, tid)
# Process has dumped a core file
# handlers['exit'] -> callback(event='exit', pid, tid, exit_status, exit_signal)
# Process has exited
# handlers['events_failed'] -> callback(event='events_failed')
# The ability to receive events has been lost
# handlers['events_good'] -> callback(event='events_good')
# The ability to receive events has been established or restored (Events may have been lost)
# handlers['events_lost'] -> callback(event='events_lost')
# The kernel reported that one or more events have been lost
#
import os
if os.geteuid() != 0:
# Non-root users can connect to NETLINK_CONNECTOR, but only root users can subscribe to the
# CN_IDX_PROC multicast group.
raise RuntimeError('The proc_events module requires this program to be run as root')
import logging # Log levels: debug info warning error/exception critical
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
__logger = logging.getLogger(__name__)
from libnl import libnl, libnl_ffi, libnl_check
import select
import errno
# If you get buffer overruns (-NLE_NOMEM errors from nl_recvmsgs()), you may want to increase this
pe_nl_rx_buffer_size = 32768 # bytes
# This is updated whenever the ability to receive events is lost or restored, and may be used to
# handle the case where the ability to receive events was never established or was lost before an
# "events_failed" handler was registered.
events_good = False
# This EventHandlers class is loosely based on the examples here:
# http://stackoverflow.com/questions/1092531/event-system-in-python
class EventHandlers(list):
def __init__(self, event, logger): self.event = event ; self.__logger = logger
def __iadd__(self, handler): self.append(handler) ; return self
def __isub__(self, handler): self.remove(handler) ; return self
def __call__(self, *list_args, **keyword_args):
self.__logger.debug('{0} event handler called with args {1} {2}:'.format(self.event, list_args, keyword_args))
keyword_args['event'] = self.event
for f in self:
try: f(*list_args, **keyword_args)
except: self.__logger.exception('Exception thrown in {0} event handler with args {1} {2}:'.format(self.event, list_args, keyword_args))
# In Python 2.7, dict((a,'b') for a in c) can be shortened to {a:'b' for a in c}
handlers = dict( (event, EventHandlers(event, __logger)) for event in
['fork', 'exec', 'uid', 'gid', 'sid', 'ptrace', 'comm', 'coredump', 'exit',
'events_failed', 'events_good', 'events_lost'] )
# Simple event handler that logs events
def log_event(**args):
full_args = []
for k, v in args.iteritems():
full_args.append('{0}: {1}'.format(k, v))
__logger.info(', '.join(full_args))
if __name__ == '__main__':
for event in handlers.iterkeys(): handlers[event] += log_event
@libnl_ffi.callback('nl_recvmsg_msg_cb_t')
def __msg_cb(msg, arg):
# Extract the netlink message (Already validated by libnl)
nl_msg_hdr = libnl.nlmsg_hdr(msg)
# Validate the netlink message's payload length
if nl_msg_hdr.nlmsg_len < libnl.nlmsg_size(libnl_ffi.sizeof('struct cn_msg')):
__logger.warn('Received a short NETLINK_CONNECTOR message, will ignore and continue (Expected {0} bytes but got {1} bytes)'.format(libnl.nlmsg_size(libnl_ffi.sizeof('struct cn_msg')), nl_msg_hdr.nlmsg_len))
return libnl.NL_SKIP
# Extract and validate the NETLINK_CONNECTOR message
# cn_msg.seq should match nl_msg_hdr.nlmsg_seq, but we don't really need to validate it
# cn_msg.flags is not used by the PROC CONNECTOR
cn_msg = libnl_ffi.cast('struct cn_msg *', libnl.nlmsg_data(nl_msg_hdr))
if cn_msg.id.idx != libnl.CN_IDX_PROC or cn_msg.id.val != libnl.CN_VAL_PROC:
__logger.warn('Received a NETLINK_CONNECTOR message with an unexpected ID, will ignore and continue (Expected idx:{0} val:{1} but got idx:{2} val:{3}) (See /usr/include/linux/connector.h)'.format(cn_msg.id.idx, cn_msg.id.val, libnl.CN_IDX_PROC, libnl.CN_VAL_PROC))
return libnl.NL_SKIP
# Validate the NETLINK_CONNECTOR message's payload length
if cn_msg.len < libnl_ffi.sizeof('struct proc_event'):
__logger.warn('Received a short PROC CONNECTOR event, will ignore and continue (Expected {0} bytes but got {1} bytes)'.format(libnl_ffi.sizeof('struct proc_event'), cn_msg.len))
return libnl.NL_SKIP
if nl_msg_hdr.nlmsg_len < libnl.nlmsg_size(libnl_ffi.sizeof('struct cn_proc_reply')):
__logger.warn('Received a NETLINK message with valid payload length but invalid message length, will ignore and continue (Expected {0} bytes but got {1} bytes)'.format(libnl.nlmsg_size(libnl_ffi.sizeof('struct cn_proc_reply')), nl_msg_hdr.nlmsg_len))
return libnl.NL_SKIP
# Extract and validate the PROC CONNECTOR event
event = libnl_ffi.cast('struct cn_proc_reply *', libnl.nlmsg_data(nl_msg_hdr)).event
if (cn_msg.ack != 0 and event.what != libnl.PROC_EVENT_NONE) or \
(cn_msg.ack == 0 and event.what == libnl.PROC_EVENT_NONE):
__logger.warn("Received a PROC CONNECTOR event with an unexpected combination of 'ack' and 'what' values, will ignore and continue (ack: {0} what: {1})".format(cn_msg.ack, event.what))
return libnl.NL_SKIP
ev_type = event.what
ev_data = event.event_data
# If the ability to receive events has not been established or was lost, it looks like things are
# working now.
global events_good
if not events_good:
events_good = True
handlers['events_good']()
# Parse the PROC CONNECTOR event (See /usr/include/linux/cn_proc.h)
if ev_type == libnl.PROC_EVENT_NONE:
# ACK in response to PROC_CN_MCAST_LISTEN or PROC_CN_MCAST_IGNORE message, don't fire an event
if ev_data.ack.err != 0: __logger.warn('Received a PROC CONNECTOR ACK message with error code {0}'.format(ev_data.ack.err))
else: __logger.debug('Received a PROC CONNECTOR ACK message')
elif ev_type == libnl.PROC_EVENT_FORK:
# Process has been created via fork()
handlers['fork'](
pid = ev_data.fork.child_tgid,
tid = ev_data.fork.child_pid,
parent_pid = ev_data.fork.parent_tgid,
parent_tid = ev_data.fork.parent_pid,
)
elif ev_type == libnl.PROC_EVENT_EXEC:
# Process has been replaced via exec()
handlers['exec'](
# 'exec' is a python keyword, so we have to use getattr(ev_data,'exec') instead of
# ev_data.exec
pid = getattr(ev_data,'exec').process_tgid,
tid = getattr(ev_data,'exec').process_pid,
)
elif ev_type == libnl.PROC_EVENT_UID:
# Process UID changed
handlers['uid'](
pid = ev_data.id.process_tgid,
tid = ev_data.id.process_pid,
real_uid = ev_data.id.r.ruid,
effective_uid = ev_data.id.e.euid,
)
elif ev_type == libnl.PROC_EVENT_GID:
# Process GID changed
handlers['gid'](
pid = ev_data.id.process_tgid,
tid = ev_data.id.process_pid,
real_gid = ev_data.id.r.rgid,
effective_gid = ev_data.id.e.egid,
)
elif ev_type == libnl.PROC_EVENT_SID:
# Process has become a session leader
# See http://lwn.net/Articles/337708/
handlers['sid'](
pid = ev_data.sid.process_tgid,
tid = ev_data.sid.process_pid,
)
elif hasattr(libnl, 'PROC_EVENT_PTRACE') and ev_type == libnl.PROC_EVENT_PTRACE:
# ptrace() has attached to process
handlers['ptrace'](
pid = ev_data.ptrace.process_tgid,
tid = ev_data.ptrace.process_pid,
tracer_pid = ev_data.ptrace.tracer_tgid,
tracer_tid = ev_data.ptrace.tracer_pid,
)
elif hasattr(libnl, 'PROC_EVENT_COMM') and ev_type == libnl.PROC_EVENT_COMM:
# Process command name has changed
# See https://lkml.org/lkml/2011/8/2/276
handlers['comm'](
pid = ev_data.comm.process_tgid,
tid = ev_data.comm.process_pid,
command = libnl_ffi.string(ev_data.comm.comm),
)
elif hasattr(libnl, 'PROC_EVENT_COREDUMP') and ev_type == libnl.PROC_EVENT_COREDUMP:
# Process has dumped a core file
handlers['coredump'](
pid = ev_data.coredump.process_tgid,
tid = ev_data.coredump.process_pid,
)
elif ev_type == libnl.PROC_EVENT_EXIT:
# Process has exited
handlers['exit'](
pid = ev_data.exit.process_tgid,
tid = ev_data.exit.process_pid,
exit_status = ev_data.exit.exit_code,
exit_signal = ev_data.exit.exit_signal,
)
else:
__logger.debug("Received a PROC CONNECTOR event with an unknown 'what' value, will ignore and continue ({0}) (See /usr/include/linux/cn_proc.h)".format(event.what))
return libnl.NL_SKIP
return libnl.NL_OK
@libnl_ffi.callback('nl_recvmsg_err_cb_t')
def __err_cb(nl_addr, nl_err, arg):
err_num = nl_err.error
try: err_str = os.strerror(err_num)
except: err_str = '(Unknown error)'
__logger.warn('Received NLMSG_ERROR with error code {0}: {1} (Will ignore and continue)'.format(err_num, err_str))
# See the notes in libnl.py about the error message callback
return lbinl.NL_SKIP
__exit = False
__thread_id = -1
def __listen():
__logger.debug('Connecting to the netlink proc connector')
nl_sock = libnl.nl_socket_alloc()
if nl_sock == libnl_ffi.NULL: raise RuntimeError('Error allocating nl_sock')
try:
# Register callbacks
libnl_check(libnl.nl_socket_modify_cb(nl_sock, libnl.NL_CB_FINISH, libnl.NL_CB_CUSTOM, __msg_cb, libnl_ffi.NULL))
libnl_check(libnl.nl_socket_modify_err_cb(nl_sock, libnl.NL_CB_CUSTOM, __err_cb, libnl_ffi.NULL))
# Multicast event sequence numbers are not sequential, so do not attempt to verify them
libnl.nl_socket_disable_seq_check(nl_sock)
# Connect
libnl_check(libnl.nl_connect(nl_sock, libnl.NETLINK_CONNECTOR))
try:
# Subscribe to the PROC CONNECTOR's multicast group
libnl_check(libnl.nl_socket_add_membership(nl_sock, libnl.CN_IDX_PROC))
# Only need to send two messages, so tx buffer can be small
libnl_check(libnl.nl_socket_set_buffer_size(nl_sock, pe_nl_rx_buffer_size, 128))
# Increment the PROC CONNECTOR's internal listener counter to ensure that it sends messages.
# This must be sent after we subscribe to the multicast group so that we can use the ACK to
# trigger the "events_good" event. (See the notes in libnl.py about the PROC CONNECTOR's
# internal counter.)
cn_proc_msg = libnl_ffi.new('struct cn_proc_msg *') # libnl_ffi.new() calls memset(0) for us
cn_proc_msg.cn_msg.id.idx = libnl.CN_IDX_PROC;
cn_proc_msg.cn_msg.id.val = libnl.CN_VAL_PROC;
cn_proc_msg.cn_msg.len = libnl_ffi.sizeof('enum proc_cn_mcast_op')
cn_proc_msg.cn_mcast = libnl.PROC_CN_MCAST_LISTEN
cn_proc_msg_size = libnl_ffi.sizeof('struct cn_proc_msg')
libnl_check(libnl.nl_send_simple(nl_sock, libnl.NLMSG_DONE, 0, cn_proc_msg, cn_proc_msg_size))
try:
# Use non-blocking mode so we can wake select() with a signal or a timeout
# In blocking mode, nl_recv() loops on signals, and we have no way to stop that loop
libnl_check(libnl.nl_socket_set_nonblocking(nl_sock))
nl_sock_fd = libnl.nl_socket_get_fd(nl_sock)
# We can only wake select() with a signal if we know the ID of this thread
# Otherwise we have to periodically wake select() with a timeout to determine when to exit
if __thread_id < 0:
__logger.info('Thread ID not available, will periodically wake select() to determine when to exit')
select_timeout = 3
else: select_timeout = None
__logger.debug('Connected to the netlink proc connector')
while not __exit:
try: r, w, x = select.select([nl_sock_fd], [], [nl_sock_fd], select_timeout)
except select.error as e:
err_num, err_str = e.args
if err_num == errno.EINTR: continue # Woken by a signal
raise RuntimeError('select() returned error: {0}'.format(e))
if len(r) == 0 and len(x) == 0: continue # Timeout
err_num = libnl.nl_recvmsgs_default(nl_sock)
if err_num == -libnl.NLE_AGAIN: continue
if err_num == -libnl.NLE_NOMEM: # See the notes in libnl.py about NLMSG_OVERRUN
handlers['events_lost']()
continue
libnl_check(err_num) # Throw an exception on other errors
finally:
__logger.debug('Disconnecting from the netlink proc connector')
global events_good
events_good = False
if not __exit: handlers['events_failed']()
# If we're here because nl_recvmsgs() or select() failed then this probably won't work, but
# we will try it anyway and ignore any errors. Since the socket is in non-blocking mode,
# you might think we need to check for NLE_AGAIN, however the 128 byte TX buffer configured
# above should be large enough to hold both of the messages we send, so NLE_AGAIN should
# never happen.
cn_proc_msg.cn_mcast = libnl.PROC_CN_MCAST_IGNORE
libnl.nl_send_simple(nl_sock, libnl.NLMSG_DONE, 0, cn_proc_msg, cn_proc_msg_size)
finally: libnl.nl_close(nl_sock)
finally: libnl.nl_socket_free(nl_sock)
import threading
from datetime import datetime
import signal
import sys
pe_throttle_interval = 3 # seconds
__listen_wake_lock = threading.Event()
# Python doesn't provide any mechanism for obtaining the OS thread ID, which we need to send a
# signal to interrupt select() on exit. Attempt to obtain it using a gettid system call via ctypes.
def __get_thread_id():
try:
# Unfortunately there is no glibc symbol for gettid(), and there is no good way to look up the
# syscall number for it, so we have to hard-code it.
gettid = -1
import platform
# Linux can probably be assumed for this particular module, but just in case
if platform.system() == 'Linux':
# This logic comes from /usr/include/asm/unistd.h in Linux 3.16.0
if platform.machine() == 'i386':
gettid = 224 # Defined in asm/unistd_32.h
elif platform.machine() == 'x86_64':
if sys.maxint == 2**31-1: # Max signed integer
x32_syscall_bit = 0x40000000 # Defined in asm/unistd.h
gettid = x32_syscall_bit + 186 # Defined in asm/unistd_x32.h
elif sys.maxint == 2**63-1: # Max signed integer
gettid = 186 # Defined in asm/unistd_64.h
if gettid > 0:
import ctypes
global __thread_id
__thread_id = ctypes.CDLL('libc.so.6').syscall(gettid)
except:
# If an error occurs, we will simply fall back to periodically waking select()
pass
def __listen_loop():
__logger.debug('Starting listen loop')
__get_thread_id()
last_exception_time = datetime.min
while not __exit:
try:
__listen()
except:
__logger.exception('Exception thrown in listen loop, will restart:')
delta = datetime.utcnow() - last_exception_time
if delta.days == 0 and delta.seconds < pe_throttle_interval:
__logger.info('Throttling listen loop for {0} seconds'.format(pe_throttle_interval))
__listen_wake_lock.wait(pe_throttle_interval)
last_exception_time = datetime.utcnow()
__logger.debug('Stopped listen loop')
__thread = threading.Thread(target=__listen_loop)
__thread.daemon = True
def __stop():
if not __thread.is_alive(): return
__logger.debug('Stopping listen loop')
global __exit
__exit = True
if __thread_id > 0:
try: os.kill(__thread_id, signal.SIGINT) # Wake select() in __listen()
except KeyboardInterrupt: pass
# os.kill() should trigger a KeyboardInterrupt immediately, but that sometimes doesn't happen
# until the __thread.join() call, so we must catch it on every call from here on out.
try: __listen_wake_lock.set() # Wake __listen_wake_lock.wait() in __listen_loop()
except KeyboardInterrupt: __listen_wake_lock.set()
try: __thread.join()
except KeyboardInterrupt: __thread.join()
# Stop the listen loop when Python exits
import atexit
atexit.register(__stop)
# Start the listen loop when this file is imported
# (Run last to avoid starting if any exceptions are thrown above)
__thread.start()
if __name__ == '__main__':
# Run until signal (CTRL-C)
try: signal.pause()
# CTRL-C causes ^C to be printed without a trailing newline
except KeyboardInterrupt: sys.stderr.write('\n')
| gpl-3.0 | 3,548,660,567,676,308,500 | 45.735661 | 268 | 0.68134 | false |
bazz-erp/erpnext | erpnext/regional/india/setup.py | 1 | 7097 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.permissions import add_permission
from erpnext.regional.india import states
def setup(company=None, patch=True):
make_custom_fields()
add_permissions()
add_custom_roles_for_reports()
add_hsn_sac_codes()
add_print_formats()
if not patch:
update_address_template()
make_fixtures()
def update_address_template():
with open(os.path.join(os.path.dirname(__file__), 'address_template.html'), 'r') as f:
html = f.read()
address_template = frappe.db.get_value('Address Template', 'India')
if address_template:
frappe.db.set_value('Address Template', 'India', 'template', html)
else:
# make new html template for India
frappe.get_doc(dict(
doctype='Address Template',
country='India',
template=html
)).insert()
def add_hsn_sac_codes():
# HSN codes
with open(os.path.join(os.path.dirname(__file__), 'hsn_code_data.json'), 'r') as f:
hsn_codes = json.loads(f.read())
create_hsn_codes(hsn_codes, code_field="hsn_code")
# SAC Codes
with open(os.path.join(os.path.dirname(__file__), 'sac_code_data.json'), 'r') as f:
sac_codes = json.loads(f.read())
create_hsn_codes(sac_codes, code_field="sac_code")
def create_hsn_codes(data, code_field):
for d in data:
if not frappe.db.exists("GST HSN Code", d[code_field]):
hsn_code = frappe.new_doc('GST HSN Code')
hsn_code.description = d["description"]
hsn_code.hsn_code = d[code_field]
hsn_code.name = d[code_field]
hsn_code.db_insert()
def add_custom_roles_for_reports():
for report_name in ('GST Sales Register', 'GST Purchase Register',
'GST Itemised Sales Register', 'GST Itemised Purchase Register'):
if not frappe.db.get_value('Custom Role', dict(report=report_name)):
frappe.get_doc(dict(
doctype='Custom Role',
report=report_name,
roles= [
dict(role='Accounts User'),
dict(role='Accounts Manager')
]
)).insert()
def add_permissions():
for doctype in ('GST HSN Code', 'GST Settings'):
add_permission(doctype, 'Accounts Manager', 0)
add_permission(doctype, 'All', 0)
def add_print_formats():
frappe.reload_doc("regional", "print_format", "gst_tax_invoice")
def make_custom_fields():
hsn_sac_field = dict(fieldname='gst_hsn_code', label='HSN/SAC',
fieldtype='Data', options='item_code.gst_hsn_code', insert_after='description',
allow_on_submit=1, print_hide=1)
invoice_gst_fields = [
dict(fieldname='gst_section', label='GST Details', fieldtype='Section Break',
insert_after='select_print_heading', print_hide=1, collapsible=1),
dict(fieldname='invoice_copy', label='Invoice Copy',
fieldtype='Select', insert_after='gst_section', print_hide=1, allow_on_submit=1,
options='Original for Recipient\nDuplicate for Transporter\nDuplicate for Supplier\nTriplicate for Supplier'),
dict(fieldname='reverse_charge', label='Reverse Charge',
fieldtype='Select', insert_after='invoice_copy', print_hide=1,
options='Y\nN', default='N'),
dict(fieldname='gst_col_break', fieldtype='Column Break', insert_after='reverse_charge'),
dict(fieldname='invoice_type', label='Invoice Type',
fieldtype='Select', insert_after='reverse_charge', print_hide=1,
options='Regular\nSEZ\nExport\nDeemed Export', default='Regular'),
dict(fieldname='export_type', label='Export Type',
fieldtype='Select', insert_after='invoice_type', print_hide=1,
depends_on='eval:in_list(["SEZ", "Export", "Deemed Export"], doc.invoice_type)',
options='\nWith Payment of Tax\nWithout Payment of Tax'),
dict(fieldname='ecommerce_gstin', label='E-commerce GSTIN',
fieldtype='Data', insert_after='export_type', print_hide=1)
]
purchase_invoice_gst_fields = [
dict(fieldname='supplier_gstin', label='Supplier GSTIN',
fieldtype='Data', insert_after='supplier_address',
options='supplier_address.gstin', print_hide=1),
dict(fieldname='company_gstin', label='Company GSTIN',
fieldtype='Data', insert_after='shipping_address',
options='shipping_address.gstin', print_hide=1)
]
sales_invoice_gst_fields = [
dict(fieldname='customer_gstin', label='Customer GSTIN',
fieldtype='Data', insert_after='shipping_address',
options='shipping_address_name.gstin', print_hide=1),
dict(fieldname='place_of_supply', label='Place of Supply',
fieldtype='Data', insert_after='customer_gstin', print_hide=1,
options='shipping_address_name.gst_state_number', read_only=1),
dict(fieldname='company_gstin', label='Company GSTIN',
fieldtype='Data', insert_after='company_address',
options='company_address.gstin', print_hide=1)
]
custom_fields = {
'Address': [
dict(fieldname='gstin', label='Party GSTIN', fieldtype='Data',
insert_after='fax'),
dict(fieldname='gst_state', label='GST State', fieldtype='Select',
options='\n'.join(states), insert_after='gstin'),
dict(fieldname='gst_state_number', label='GST State Number',
fieldtype='Int', insert_after='gst_state', read_only=1),
],
'Purchase Invoice': purchase_invoice_gst_fields + invoice_gst_fields,
'Sales Invoice': sales_invoice_gst_fields + invoice_gst_fields,
"Delivery Note": sales_invoice_gst_fields,
'Item': [
dict(fieldname='gst_hsn_code', label='HSN/SAC',
fieldtype='Link', options='GST HSN Code', insert_after='item_group'),
],
'Quotation Item': [hsn_sac_field],
'Supplier Quotation Item': [hsn_sac_field],
'Sales Order Item': [hsn_sac_field],
'Delivery Note Item': [hsn_sac_field],
'Sales Invoice Item': [hsn_sac_field],
'Purchase Order Item': [hsn_sac_field],
'Purchase Receipt Item': [hsn_sac_field],
'Purchase Invoice Item': [hsn_sac_field]
}
for doctype, fields in custom_fields.items():
for df in fields:
field = frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df["fieldname"]})
if not field:
create_custom_field(doctype, df)
else:
custom_field = frappe.get_doc("Custom Field", field)
custom_field.update(df)
custom_field.save()
def make_fixtures():
docs = [
{'doctype': 'Salary Component', 'salary_component': 'Professional Tax', 'description': 'Professional Tax', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'Provident Fund', 'description': 'Provident fund', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'House Rent Allowance', 'description': 'House Rent Allowance', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Basic', 'description': 'Basic', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Arrear', 'description': 'Arrear', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Leave Encashment', 'description': 'Leave Encashment', 'type': 'Earning'}
]
for d in docs:
try:
doc = frappe.get_doc(d)
doc.flags.ignore_permissions = True
doc.insert()
except frappe.NameError:
pass
| gpl-3.0 | 2,784,275,214,947,402,000 | 39.096045 | 136 | 0.693251 | false |
serverdensity/sd-agent-core-plugins | marathon/test_marathon.py | 1 | 2878 | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import os
# project
from tests.checks.common import AgentCheckTest, Fixtures
DEPLOYMENT_METRICS_CONFIG = {
'init_config': {
'default_timeout': 5
},
'instances': [
{
'url': 'http://localhost:8080',
'enable_deployment_metrics': True
}
]
}
DEFAULT_CONFIG = {
'init_config': {
'default_timeout': 5
},
'instances': [
{
'url': 'http://localhost:8080'
}
]
}
APP_METRICS = [
'marathon.backoffFactor',
'marathon.backoffSeconds',
'marathon.cpus',
'marathon.disk',
'marathon.instances',
'marathon.mem',
# 'marathon.taskRateLimit', # Not present in fixture
'marathon.tasksRunning',
'marathon.tasksStaged',
'marathon.tasksHealthy',
'marathon.tasksUnhealthy'
]
Q_METRICS = [
'marathon.queue.count',
'marathon.queue.delay',
'marathon.queue.offers.processed',
'marathon.queue.offers.unused',
'marathon.queue.offers.reject.last',
'marathon.queue.offers.reject.launch',
]
class MarathonCheckTest(AgentCheckTest):
CHECK_NAME = 'marathon'
def test_default_configuration(self):
ci_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ci")
def side_effect(url, timeout, auth, acs_url, verify):
if "v2/apps" in url:
return Fixtures.read_json_file("apps.json", sdk_dir=ci_dir)
elif "v2/deployments" in url:
return Fixtures.read_json_file("deployments.json", sdk_dir=ci_dir)
elif "v2/queue" in url:
return Fixtures.read_json_file("queue.json", sdk_dir=ci_dir)
else:
raise Exception("unknown url:" + url)
self.run_check(DEFAULT_CONFIG, mocks={"get_json": side_effect})
self.assertMetric('marathon.apps', value=2)
for metric in APP_METRICS:
self.assertMetric(metric, count=1, tags=['app_id:/my-app', 'version:2016-08-25T18:13:34.079Z'])
self.assertMetric(metric, count=1, tags=['app_id:/my-app-2', 'version:2016-08-25T18:13:34.079Z'])
self.assertMetric('marathon.deployments', value=1)
for metric in Q_METRICS:
self.assertMetric(metric, at_least=1)
def test_empty_responses(self):
def side_effect(url, timeout, auth, acs_url, verify):
if "v2/apps" in url:
return {"apps": []}
elif "v2/deployments" in url:
return {"deployments": []}
elif "v2/queue" in url:
return {"queue": []}
else:
raise Exception("unknown url:" + url)
self.run_check(DEFAULT_CONFIG, mocks={"get_json": side_effect})
self.assertMetric('marathon.apps', value=0)
| bsd-3-clause | 7,696,648,075,536,572,000 | 29.617021 | 109 | 0.588256 | false |
h8liu/cumulus | python/cumulus/__init__.py | 1 | 38157 | # Cumulus: Efficient Filesystem Backup to the Cloud
# Copyright (C) 2008-2009, 2012 The Cumulus Developers
# See the AUTHORS file for a list of contributors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""High-level interface for working with Cumulus archives.
This module provides an easy interface for reading from and manipulating
various parts of a Cumulus archive:
- listing the snapshots and segments present
- reading segment contents
- parsing snapshot descriptors and snapshot metadata logs
- reading and maintaining the local object database
"""
from __future__ import division, print_function, unicode_literals
import codecs
import hashlib
import itertools
import os
import posixpath
import re
import sqlite3
import subprocess
import sys
import tarfile
import tempfile
try:
import _thread
except ImportError:
import thread as _thread
import cumulus.store
import cumulus.store.file
if sys.version < "3":
StringTypes = (str, unicode)
else:
StringTypes = (str,)
# The largest supported snapshot format that can be understood.
FORMAT_VERSION = (0, 11) # Cumulus Snapshot v0.11
# Maximum number of nested indirect references allowed in a snapshot.
MAX_RECURSION_DEPTH = 3
# All segments which have been accessed this session.
accessed_segments = set()
# Table of methods used to filter segments before storage, and corresponding
# filename extensions. These are listed in priority order (methods earlier in
# the list are tried first).
SEGMENT_FILTERS = [
(".gpg", "cumulus-filter-gpg --decrypt"),
(".gz", "gzip -dc"),
(".bz2", "bzip2 -dc"),
("", None),
]
def to_lines(data):
"""Decode binary data from a file into a sequence of lines.
Newline markers are retained."""
return list(codecs.iterdecode(data.splitlines(True), "utf-8"))
def uri_decode(s):
"""Decode a URI-encoded (%xx escapes) string."""
def hex_decode(m): return chr(int(m.group(1), 16))
return re.sub(r"%([0-9a-f]{2})", hex_decode, s)
def uri_encode(s):
"""Encode a string to URI-encoded (%xx escapes) form."""
def hex_encode(c):
if c > '+' and c < '\x7f' and c != '@':
return c
else:
return "%%%02x" % (ord(c),)
return ''.join(hex_encode(c) for c in s)
class Struct:
"""A class which merely acts as a data container.
Instances of this class (or its subclasses) are merely used to store data
in various attributes. No methods are provided.
"""
def __repr__(self):
return "<%s %s>" % (self.__class__, self.__dict__)
CHECKSUM_ALGORITHMS = {
'sha1': hashlib.sha1,
'sha224': hashlib.sha224,
'sha256': hashlib.sha256,
}
class ChecksumCreator:
"""Compute a Cumulus checksum for provided data.
The algorithm used is selectable, but currently defaults to sha1.
"""
def __init__(self, algorithm='sha1'):
self.algorithm = algorithm
self.hash = CHECKSUM_ALGORITHMS[algorithm]()
def update(self, data):
self.hash.update(data)
return self
def compute(self):
return "%s=%s" % (self.algorithm, self.hash.hexdigest())
class ChecksumVerifier:
"""Verify whether a checksum from a snapshot matches the supplied data."""
def __init__(self, checksumstr):
"""Create an object to check the supplied checksum."""
(algo, checksum) = checksumstr.split("=", 1)
self.checksum = checksum
self.hash = CHECKSUM_ALGORITHMS[algo]()
def update(self, data):
self.hash.update(data)
def valid(self):
"""Return a boolean indicating whether the checksum matches."""
result = self.hash.hexdigest()
return result == self.checksum
class SearchPathEntry(object):
"""Item representing a possible search location for Cumulus files.
Some Cumulus files might be stored in multiple possible file locations: due
to format (different compression mechanisms with different extensions),
locality (different segments might be placed in different directories to
control archiving policies), for backwards compatibility (default location
changed over time). A SearchPathEntry describes a possible location for a
file.
"""
def __init__(self, directory_prefix, suffix, context=None):
self._directory_prefix = directory_prefix
self._suffix = suffix
self._context = context
def __repr__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__,
self._directory_prefix, self._suffix,
self._context)
def build_path(self, basename):
"""Construct the search path to use for a file with name basename.
Returns a tuple (pathname, context), where pathname is the path to try
and context is any additional data associated with this search entry
(if any).
"""
return (posixpath.join(self._directory_prefix, basename + self._suffix),
self._context)
class SearchPath(object):
"""A collection of locations to search for files and lookup utilities.
For looking for a file in a Cumulus storage backend, a SearchPath object
contains a list of possible locations to try. A SearchPath can be used to
perform the search as well; when a file is found the search path ordering
is updated (moving the successful SearchPathEntry to the front of the list
for future searches).
"""
def __init__(self, name_regex, searchpath):
self._regex = re.compile(name_regex)
self._path = list(searchpath)
def add_search_entry(self, entry):
self._path.append(entry)
def directories(self):
"""Return the set of directories to search for a file type."""
return set(entry._directory_prefix for entry in self._path)
def get(self, backend, basename):
for (i, entry) in enumerate(self._path):
try:
(pathname, context) = entry.build_path(basename)
fp = backend.get(pathname)
# On success, move this entry to the front of the search path
# to speed future searches.
if i > 0:
self._path.pop(i)
self._path.insert(0, entry)
return (fp, pathname, context)
except cumulus.store.NotFoundError:
continue
raise cumulus.store.NotFoundError(basename)
def stat(self, backend, basename):
for (i, entry) in enumerate(self._path):
try:
(pathname, context) = entry.build_path(basename)
stat_data = backend.stat(pathname)
# On success, move this entry to the front of the search path
# to speed future searches.
if i > 0:
self._path.pop(i)
self._path.insert(0, entry)
result = {"path": pathname}
result.update(stat_data)
return result
except cumulus.store.NotFoundError:
continue
raise cumulus.store.NotFoundError(basename)
def match(self, filename):
return self._regex.match(filename)
def list(self, backend):
success = False
for d in self.directories():
try:
for f in backend.list(d):
success = True
m = self.match(f)
if m: yield (posixpath.join(d, f), m)
except cumulus.store.NotFoundError:
pass
if not success:
raise cumulus.store.NotFoundError(backend)
def _build_segments_searchpath(prefix):
for (extension, filter) in SEGMENT_FILTERS:
yield SearchPathEntry(prefix, extension, filter)
SEARCH_PATHS = {
"checksums": SearchPath(
r"^snapshot-(.*)\.(\w+)sums$",
[SearchPathEntry("meta", ".sha1sums"),
SearchPathEntry("checksums", ".sha1sums"),
SearchPathEntry("", ".sha1sums")]),
"meta": SearchPath(
r"^snapshot-(.*)\.meta(\.\S+)?$",
_build_segments_searchpath("meta")),
"segments": SearchPath(
(r"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"
r"\.tar(\.\S+)?$"),
itertools.chain(
_build_segments_searchpath("segments0"),
_build_segments_searchpath("segments1"),
_build_segments_searchpath(""),
_build_segments_searchpath("segments"))),
"snapshots": SearchPath(
r"^snapshot-(.*)\.(cumulus|lbs)$",
[SearchPathEntry("snapshots", ".cumulus"),
SearchPathEntry("snapshots", ".lbs"),
SearchPathEntry("", ".cumulus"),
SearchPathEntry("", ".lbs")]),
}
class BackendWrapper(object):
"""Wrapper around a Cumulus storage backend that understands file types.
The BackendWrapper class understands different Cumulus file types, such as
snapshots and segments, and implements higher-level operations such as
"retrieve a snapshot with a specific name" (hiding operations such as
searching for the correct file name).
"""
def __init__(self, backend):
"""Initializes a wrapper around the specified storage backend.
store may either be a Store object or URL.
"""
if type(backend) in StringTypes:
self._backend = cumulus.store.open(backend)
else:
self._backend = backend
@property
def raw_backend(self):
return self._backend
def stat_generic(self, basename, filetype):
return SEARCH_PATHS[filetype].stat(self._backend, basename)
def open_generic(self, basename, filetype):
return SEARCH_PATHS[filetype].get(self._backend, basename)
def open_snapshot(self, name):
return self.open_generic("snapshot-" + name, "snapshots")
def open_segment(self, name):
return self.open_generic(name + ".tar", "segments")
def list_generic(self, filetype):
return ((x[1].group(1), x[0])
for x in SEARCH_PATHS[filetype].list(self._backend))
def prefetch_generic(self):
"""Calls scan on directories to prefetch file metadata."""
directories = set()
for typeinfo in SEARCH_PATHS.values():
directories.update(typeinfo.directories())
for d in directories:
print("Prefetch", d)
self._backend.scan(d)
class CumulusStore:
def __init__(self, backend):
if isinstance(backend, BackendWrapper):
self.backend = backend
else:
self.backend = BackendWrapper(backend)
self.cachedir = None
self.CACHE_SIZE = 16
self._lru_list = []
def get_cachedir(self):
if self.cachedir is None:
self.cachedir = tempfile.mkdtemp("-cumulus")
return self.cachedir
def cleanup(self):
if self.cachedir is not None:
# TODO: Avoid use of system, make this safer
os.system("rm -rf " + self.cachedir)
self.cachedir = None
@staticmethod
def parse_ref(refstr):
m = re.match(r"^zero\[(\d+)\]$", refstr)
if m:
return ("zero", None, None, (0, int(m.group(1)), False))
m = re.match(r"^([-0-9a-f]+)\/([0-9a-f]+)(\(\S+\))?(\[(=?(\d+)|(\d+)\+(\d+))\])?$", refstr)
if not m: return
segment = m.group(1)
object = m.group(2)
checksum = m.group(3)
slice = m.group(4)
if checksum is not None:
checksum = checksum.lstrip("(").rstrip(")")
if slice is not None:
if m.group(6) is not None:
# Size-assertion slice
slice = (0, int(m.group(6)), True)
else:
slice = (int(m.group(7)), int(m.group(8)), False)
return (segment, object, checksum, slice)
def list_snapshots(self):
return set(x[0] for x in self.backend.list_generic("snapshots"))
def list_segments(self):
return set(x[0] for x in self.backend.list_generic("segments"))
def load_snapshot(self, snapshot):
snapshot_file = self.backend.open_snapshot(snapshot)[0]
return to_lines(snapshot_file.read())
@staticmethod
def filter_data(filehandle, filter_cmd):
if filter_cmd is None:
return filehandle
p = subprocess.Popen(filter_cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
input, output = p.stdin, p.stdout
def copy_thread(src, dst):
BLOCK_SIZE = 4096
while True:
block = src.read(BLOCK_SIZE)
if len(block) == 0: break
dst.write(block)
src.close()
dst.close()
p.wait()
_thread.start_new_thread(copy_thread, (filehandle, input))
return output
def get_segment(self, segment):
accessed_segments.add(segment)
(segment_fp, path, filter_cmd) = self.backend.open_segment(segment)
return self.filter_data(segment_fp, filter_cmd)
def load_segment(self, segment):
seg = tarfile.open(segment, 'r|', self.get_segment(segment))
for item in seg:
data_obj = seg.extractfile(item)
path = item.name.split('/')
if len(path) == 2 and path[0] == segment:
yield (path[1], data_obj.read())
def extract_segment(self, segment):
segdir = os.path.join(self.get_cachedir(), segment)
os.mkdir(segdir)
for (object, data) in self.load_segment(segment):
f = open(os.path.join(segdir, object), 'wb')
f.write(data)
f.close()
def load_object(self, segment, object):
accessed_segments.add(segment)
path = os.path.join(self.get_cachedir(), segment, object)
if not os.access(path, os.R_OK):
self.extract_segment(segment)
if segment in self._lru_list: self._lru_list.remove(segment)
self._lru_list.append(segment)
while len(self._lru_list) > self.CACHE_SIZE:
os.system("rm -rf " + os.path.join(self.cachedir,
self._lru_list[0]))
self._lru_list = self._lru_list[1:]
return open(path, 'rb').read()
def get(self, refstr):
"""Fetch the given object and return it.
The input should be an object reference, in string form.
"""
(segment, object, checksum, slice) = self.parse_ref(refstr)
if segment == "zero":
return "\0" * slice[1]
data = self.load_object(segment, object)
if checksum is not None:
verifier = ChecksumVerifier(checksum)
verifier.update(data)
if not verifier.valid():
raise ValueError
if slice is not None:
(start, length, exact) = slice
# Note: The following assertion check may need to be commented out
# to restore from pre-v0.8 snapshots, as the syntax for
# size-assertion slices has changed.
if exact and len(data) != length: raise ValueError
data = data[start:start+length]
if len(data) != length: raise IndexError
return data
def prefetch(self):
self.backend.prefetch_generic()
def parse(lines, terminate=None):
"""Generic parser for RFC822-style "Key: Value" data streams.
This parser can be used to read metadata logs and snapshot root descriptor
files.
lines must be an iterable object which yields a sequence of lines of input.
If terminate is specified, it is used as a predicate to determine when to
stop reading input lines.
"""
dict = {}
last_key = None
for l in lines:
# Strip off a trailing newline, if present
if len(l) > 0 and l[-1] == "\n":
l = l[:-1]
if terminate is not None and terminate(l):
if len(dict) > 0: yield dict
dict = {}
last_key = None
continue
m = re.match(r"^([-\w]+):\s*(.*)$", l)
if m:
dict[m.group(1)] = m.group(2)
last_key = m.group(1)
elif len(l) > 0 and l[0].isspace() and last_key is not None:
dict[last_key] += l
else:
last_key = None
if len(dict) > 0: yield dict
def parse_full(lines):
try:
return next(parse(lines))
except StopIteration:
return {}
def parse_metadata_version(s):
"""Convert a string with the snapshot version format to a tuple."""
m = re.match(r"^(?:Cumulus|LBS) Snapshot v(\d+(\.\d+)*)$", s)
if m is None:
return ()
else:
return tuple([int(d) for d in m.group(1).split(".")])
def read_metadata(object_store, root):
"""Iterate through all lines in the metadata log, following references."""
# Stack for keeping track of recursion when following references to
# portions of the log. The last entry in the stack corresponds to the
# object currently being parsed. Each entry is a list of lines which have
# been reversed, so that popping successive lines from the end of each list
# will return lines of the metadata log in order.
stack = []
def follow_ref(refstr):
if len(stack) >= MAX_RECURSION_DEPTH: raise OverflowError
lines = to_lines(object_store.get(refstr))
lines.reverse()
stack.append(lines)
follow_ref(root)
while len(stack) > 0:
top = stack[-1]
if len(top) == 0:
stack.pop()
continue
line = top.pop()
# An indirect reference which we must follow?
if len(line) > 0 and line[0] == '@':
ref = line[1:]
ref.strip()
follow_ref(ref)
else:
yield line
class MetadataItem:
"""Metadata for a single file (or directory or...) from a snapshot."""
# Functions for parsing various datatypes that can appear in a metadata log
# item.
@staticmethod
def decode_int(s):
"""Decode an integer, expressed in decimal, octal, or hexadecimal."""
if s.startswith("0x"):
return int(s, 16)
elif s.startswith("0"):
return int(s, 8)
else:
return int(s, 10)
@staticmethod
def decode_str(s):
"""Decode a URI-encoded (%xx escapes) string."""
return uri_decode(s)
@staticmethod
def raw_str(s):
"""An unecoded string."""
return s
@staticmethod
def decode_user(s):
"""Decode a user/group to a tuple of uid/gid followed by name."""
items = s.split()
uid = MetadataItem.decode_int(items[0])
name = None
if len(items) > 1:
if items[1].startswith("(") and items[1].endswith(")"):
name = MetadataItem.decode_str(items[1][1:-1])
return (uid, name)
@staticmethod
def decode_device(s):
"""Decode a device major/minor number."""
(major, minor) = map(MetadataItem.decode_int, s.split("/"))
return (major, minor)
class Items: pass
def __init__(self, fields, object_store):
"""Initialize from a dictionary of key/value pairs from metadata log."""
self.fields = fields
self.object_store = object_store
self.keys = []
self.items = self.Items()
for (k, v) in fields.items():
if k in self.field_types:
decoder = self.field_types[k]
setattr(self.items, k, decoder(v))
self.keys.append(k)
def data(self):
"""Return an iterator for the data blocks that make up a file."""
# This traverses the list of blocks that make up a file, following
# indirect references. It is implemented in much the same way as
# read_metadata, so see that function for details of the technique.
objects = self.fields['data'].split()
objects.reverse()
stack = [objects]
def follow_ref(refstr):
if len(stack) >= MAX_RECURSION_DEPTH: raise OverflowError
objects = self.object_store.get(refstr).split()
objects.reverse()
stack.append(objects)
while len(stack) > 0:
top = stack[-1]
if len(top) == 0:
stack.pop()
continue
ref = top.pop()
# An indirect reference which we must follow?
if len(ref) > 0 and ref[0] == '@':
follow_ref(ref[1:])
else:
yield ref
# Description of fields that might appear, and how they should be parsed.
MetadataItem.field_types = {
'name': MetadataItem.decode_str,
'type': MetadataItem.raw_str,
'mode': MetadataItem.decode_int,
'device': MetadataItem.decode_device,
'user': MetadataItem.decode_user,
'group': MetadataItem.decode_user,
'ctime': MetadataItem.decode_int,
'mtime': MetadataItem.decode_int,
'links': MetadataItem.decode_int,
'inode': MetadataItem.raw_str,
'checksum': MetadataItem.decode_str,
'size': MetadataItem.decode_int,
'contents': MetadataItem.decode_str,
'target': MetadataItem.decode_str,
}
def iterate_metadata(object_store, root):
for d in parse(read_metadata(object_store, root), lambda l: len(l) == 0):
yield MetadataItem(d, object_store)
class LocalDatabase:
"""Access to the local database of snapshot contents and object checksums.
The local database is consulted when creating a snapshot to determine what
data can be re-used from old snapshots. Segment cleaning is performed by
manipulating the data in the local database; the local database also
includes enough data to guide the segment cleaning process.
"""
def __init__(self, path, dbname="localdb.sqlite"):
self.db_connection = sqlite3.connect(path + "/" + dbname)
# Low-level database access. Use these methods when there isn't a
# higher-level interface available. Exception: do, however, remember to
# use the commit() method after making changes to make sure they are
# actually saved, even when going through higher-level interfaces.
def commit(self):
"Commit any pending changes to the local database."
self.db_connection.commit()
def rollback(self):
"Roll back any pending changes to the local database."
self.db_connection.rollback()
def cursor(self):
"Return a DB-API cursor for directly accessing the local database."
return self.db_connection.cursor()
def list_schemes(self):
"""Return the list of snapshots found in the local database.
The returned value is a list of tuples (id, scheme, name, time, intent).
"""
cur = self.cursor()
cur.execute("select distinct scheme from snapshots")
schemes = [row[0] for row in cur.fetchall()]
schemes.sort()
return schemes
def list_snapshots(self, scheme):
"""Return a list of snapshots for the given scheme."""
cur = self.cursor()
cur.execute("select name from snapshots")
snapshots = [row[0] for row in cur.fetchall()]
snapshots.sort()
return snapshots
def delete_snapshot(self, scheme, name):
"""Remove the specified snapshot from the database.
Warning: This does not garbage collect all dependent data in the
database, so it must be followed by a call to garbage_collect() to make
the database consistent.
"""
cur = self.cursor()
cur.execute("delete from snapshots where scheme = ? and name = ?",
(scheme, name))
def prune_old_snapshots(self, scheme, intent=1.0):
"""Delete entries from old snapshots from the database.
Only snapshots with the specified scheme name will be deleted. If
intent is given, it gives the intended next snapshot type, to determine
how aggressively to clean (for example, intent=7 could be used if the
next snapshot will be a weekly snapshot).
"""
cur = self.cursor()
# Find the id of the last snapshot to be created. This is used for
# measuring time in a way: we record this value in each segment we
# expire on this run, and then on a future run can tell if there have
# been intervening backups made.
cur.execute("select max(snapshotid) from snapshots")
last_snapshotid = cur.fetchone()[0]
# Get the list of old snapshots for this scheme. Delete all the old
# ones. Rules for what to keep:
# - Always keep the most recent snapshot.
# - If snapshot X is younger than Y, and X has higher intent, then Y
# can be deleted.
cur.execute("""select snapshotid, name, intent,
julianday('now') - timestamp as age
from snapshots where scheme = ?
order by age""", (scheme,))
first = True
max_intent = intent
for (id, name, snap_intent, snap_age) in cur.fetchall():
can_delete = False
if snap_intent < max_intent:
# Delete small-intent snapshots if there is a more recent
# large-intent snapshot.
can_delete = True
elif snap_intent == intent:
# Delete previous snapshots with the specified intent level.
can_delete = True
if can_delete and not first:
print("Delete snapshot %d (%s)" % (id, name))
cur.execute("delete from snapshots where snapshotid = ?",
(id,))
first = False
max_intent = max(max_intent, snap_intent)
self.garbage_collect()
def garbage_collect(self):
"""Garbage-collect unreachable segment and object data.
Remove all segments and checksums which is not reachable from the
current set of snapshots stored in the local database.
"""
cur = self.cursor()
# Delete entries in the segment_utilization table which are for
# non-existent snapshots.
cur.execute("""delete from segment_utilization
where snapshotid not in
(select snapshotid from snapshots)""")
# Delete segments not referenced by any current snapshots.
cur.execute("""delete from segments where segmentid not in
(select segmentid from segment_utilization)""")
# Delete dangling objects in the block_index table.
cur.execute("""delete from block_index
where segmentid not in
(select segmentid from segments)""")
# Remove sub-block signatures for deleted objects.
cur.execute("""delete from subblock_signatures
where blockid not in
(select blockid from block_index)""")
# Segment cleaning.
class SegmentInfo(Struct): pass
def get_segment_cleaning_list(self, age_boost=0.0):
"""Return a list of all current segments with information for cleaning.
Return all segments which are currently known in the local database
(there might be other, older segments in the archive itself), and
return usage statistics for each to help decide which segments to
clean.
The returned list will be sorted by estimated cleaning benefit, with
segments that are best to clean at the start of the list.
If specified, the age_boost parameter (measured in days) will added to
the age of each segment, as a way of adjusting the benefit computation
before a long-lived snapshot is taken (for example, age_boost might be
set to 7 when cleaning prior to taking a weekly snapshot).
"""
cur = self.cursor()
segments = []
cur.execute("""select segmentid, used, size, mtime,
julianday('now') - mtime as age from segment_info
where expire_time is null""")
for row in cur:
info = self.SegmentInfo()
info.id = row[0]
info.used_bytes = row[1]
info.size_bytes = row[2]
info.mtime = row[3]
info.age_days = row[4]
# If data is not available for whatever reason, treat it as 0.0.
if info.age_days is None:
info.age_days = 0.0
if info.used_bytes is None:
info.used_bytes = 0.0
# Benefit calculation: u is the estimated fraction of each segment
# which is utilized (bytes belonging to objects still in use
# divided by total size; this doesn't take compression or storage
# overhead into account, but should give a reasonable estimate).
#
# The total benefit is a heuristic that combines several factors:
# the amount of space that can be reclaimed (1 - u), an ageing
# factor (info.age_days) that favors cleaning old segments to young
# ones and also is more likely to clean segments that will be
# rewritten for long-lived snapshots (age_boost), and finally a
# penalty factor for the cost of re-uploading data (u + 0.1).
u = info.used_bytes / info.size_bytes
info.cleaning_benefit \
= (1 - u) * (info.age_days + age_boost) / (u + 0.1)
segments.append(info)
segments.sort(cmp, key=lambda s: s.cleaning_benefit, reverse=True)
return segments
def mark_segment_expired(self, segment):
"""Mark a segment for cleaning in the local database.
The segment parameter should be either a SegmentInfo object or an
integer segment id. Objects in the given segment will be marked as
expired, which means that any future snapshots that would re-use those
objects will instead write out a new copy of the object, and thus no
future snapshots will depend upon the given segment.
"""
if isinstance(segment, int):
id = segment
elif isinstance(segment, self.SegmentInfo):
id = segment.id
else:
raise TypeError("Invalid segment: %s, must be of type int or SegmentInfo, not %s" % (segment, type(segment)))
cur = self.cursor()
cur.execute("select max(snapshotid) from snapshots")
last_snapshotid = cur.fetchone()[0]
cur.execute("update segments set expire_time = ? where segmentid = ?",
(last_snapshotid, id))
cur.execute("update block_index set expired = 0 where segmentid = ?",
(id,))
def balance_expired_objects(self):
"""Analyze expired objects in segments to be cleaned and group by age.
Update the block_index table of the local database to group expired
objects by age. The exact number of buckets and the cutoffs for each
are dynamically determined. Calling this function after marking
segments expired will help in the segment cleaning process, by ensuring
that when active objects from clean segments are rewritten, they will
be placed into new segments roughly grouped by age.
"""
# The expired column of the block_index table is used when generating a
# new Cumulus snapshot. A null value indicates that an object may be
# re-used. Otherwise, an object must be written into a new segment if
# needed. Objects with distinct expired values will be written into
# distinct segments, to allow for some grouping by age. The value 0 is
# somewhat special in that it indicates any rewritten objects can be
# placed in the same segment as completely new objects; this can be
# used for very young objects which have been expired, or objects not
# expected to be encountered.
#
# In the balancing process, all objects which are not used in any
# current snapshots will have expired set to 0. Objects which have
# been seen will be sorted by age and will have expired values set to
# 0, 1, 2, and so on based on age (with younger objects being assigned
# lower values). The number of buckets and the age cutoffs is
# determined by looking at the distribution of block ages.
cur = self.cursor()
# Mark all expired objects with expired = 0; these objects will later
# have values set to indicate groupings of objects when repacking.
cur.execute("""update block_index set expired = 0
where expired is not null""")
# We will want to aim for at least one full segment for each bucket
# that we eventually create, but don't know how many bytes that should
# be due to compression. So compute the average number of bytes in
# each expired segment as a rough estimate for the minimum size of each
# bucket. (This estimate could be thrown off by many not-fully-packed
# segments, but for now don't worry too much about that.) If we can't
# compute an average, it's probably because there are no expired
# segments, so we have no more work to do.
cur.execute("""select avg(size) from segments
where segmentid in
(select distinct segmentid from block_index
where expired is not null)""")
segment_size_estimate = cur.fetchone()[0]
if not segment_size_estimate:
return
# Next, extract distribution of expired objects (number and size) by
# age. Save the timestamp for "now" so that the classification of
# blocks into age buckets will not change later in the function, after
# time has passed. Set any timestamps in the future to now, so we are
# guaranteed that for the rest of this function, age is always
# non-negative.
cur.execute("select julianday('now')")
now = cur.fetchone()[0]
cur.execute("""update block_index set timestamp = ?
where timestamp > ? and expired is not null""",
(now, now))
cur.execute("""select round(? - timestamp) as age, count(*), sum(size)
from block_index where expired = 0
group by age order by age""", (now,))
distribution = cur.fetchall()
# Start to determine the buckets for expired objects. Heuristics used:
# - An upper bound on the number of buckets is given by the number of
# segments we estimate it will take to store all data. In fact,
# aim for a couple of segments per bucket.
# - Place very young objects in bucket 0 (place with new objects)
# unless there are enough of them to warrant a separate bucket.
# - Try not to create unnecessarily many buckets, since fewer buckets
# will allow repacked data to be grouped based on spatial locality
# (while more buckets will group by temporal locality). We want a
# balance.
MIN_AGE = 4
total_bytes = sum([i[2] for i in distribution])
target_buckets = 2 * (total_bytes / segment_size_estimate) ** 0.4
min_size = 1.5 * segment_size_estimate
target_size = max(2 * segment_size_estimate,
total_bytes / target_buckets)
print("segment_size:", segment_size_estimate)
print("distribution:", distribution)
print("total_bytes:", total_bytes)
print("target_buckets:", target_buckets)
print("min, target size:", min_size, target_size)
# Chosen cutoffs. Each bucket consists of objects with age greater
# than one cutoff value, but not greater than the next largest cutoff.
cutoffs = []
# Starting with the oldest objects, begin grouping together into
# buckets of size at least target_size bytes.
distribution.reverse()
bucket_size = 0
min_age_bucket = False
for (age, items, size) in distribution:
if bucket_size >= target_size \
or (age < MIN_AGE and not min_age_bucket):
if bucket_size < target_size and len(cutoffs) > 0:
cutoffs.pop()
cutoffs.append(age)
bucket_size = 0
bucket_size += size
if age < MIN_AGE:
min_age_bucket = True
# The last (youngest) bucket will be group 0, unless it has enough data
# to be of size min_size by itself, or there happen to be no objects
# less than MIN_AGE at all.
if bucket_size >= min_size or not min_age_bucket:
cutoffs.append(-1)
cutoffs.append(-1)
print("cutoffs:", cutoffs)
# Update the database to assign each object to the appropriate bucket.
cutoffs.reverse()
for i in range(len(cutoffs)):
cur.execute("""update block_index set expired = ?
where round(? - timestamp) > ?
and expired is not null""",
(i, now, cutoffs[i]))
| gpl-2.0 | 7,783,808,071,367,975,000 | 36.929423 | 121 | 0.603533 | false |
tonybaloney/st2 | st2api/tests/unit/controllers/v1/test_timers_rbac.py | 1 | 7529 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import six
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2common.models.db.timer import TimerDB
from st2tests.fixturesloader import FixturesLoader
from tests.base import APIControllerWithRBACTestCase
http_client = six.moves.http_client
__all__ = [
'TimerControllerRBACTestCase'
]
FIXTURES_PACK = 'timers'
TEST_FIXTURES = {
'triggers': ['cron1.yaml', 'date1.yaml', 'interval1.yaml', 'interval2.yaml', 'interval3.yaml']
}
class TimerControllerRBACTestCase(APIControllerWithRBACTestCase):
fixtures_loader = FixturesLoader()
def setUp(self):
super(TimerControllerRBACTestCase, self).setUp()
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
file_name = 'cron1.yaml'
TimerControllerRBACTestCase.TRIGGER_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'triggers': [file_name]})['triggers'][file_name]
file_name = 'date1.yaml'
TimerControllerRBACTestCase.TRIGGER_2 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'triggers': [file_name]})['triggers'][file_name]
file_name = 'interval1.yaml'
TimerControllerRBACTestCase.TRIGGER_3 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'triggers': [file_name]})['triggers'][file_name]
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='timer_list')
user_1_db = User.add_or_update(user_1_db)
self.users['timer_list'] = user_1_db
user_2_db = UserDB(name='timer_view')
user_2_db = User.add_or_update(user_2_db)
self.users['timer_view'] = user_2_db
# Roles
# timer_list
grant_db = PermissionGrantDB(resource_uid=None,
resource_type=ResourceType.TIMER,
permission_types=[PermissionType.TIMER_LIST])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='timer_list', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['timer_list'] = role_1_db
# timer_View on timer 1
trigger_db = self.models['triggers']['cron1.yaml']
timer_uid = TimerDB(name=trigger_db.name, pack=trigger_db.pack).get_uid()
grant_db = PermissionGrantDB(resource_uid=timer_uid,
resource_type=ResourceType.TIMER,
permission_types=[PermissionType.TIMER_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='timer_view', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['timer_view'] = role_1_db
# Role assignments
role_assignment_db = UserRoleAssignmentDB(
user=self.users['timer_list'].name,
role=self.roles['timer_list'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
role_assignment_db = UserRoleAssignmentDB(
user=self.users['timer_view'].name,
role=self.roles['timer_view'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_get_all_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
resp = self.app.get('/v1/timers', expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "timer_list"')
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
trigger_db = self.models['triggers']['cron1.yaml']
trigger_id = trigger_db.id
timer_uid = TimerDB(name=trigger_db.name, pack=trigger_db.pack).get_uid()
resp = self.app.get('/v1/timers/%s' % (trigger_id), expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "timer_view"'
' on resource "%s"' % (timer_uid))
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_all_permission_success_get_one_no_permission_failure(self):
user_db = self.users['timer_list']
self.use_user(user_db)
# timer_list permission, but no timer_view permission
resp = self.app.get('/v1/timers')
self.assertEqual(resp.status_code, httplib.OK)
self.assertEqual(len(resp.json), 5)
trigger_db = self.models['triggers']['cron1.yaml']
trigger_id = trigger_db.id
timer_uid = TimerDB(name=trigger_db.name, pack=trigger_db.pack).get_uid()
resp = self.app.get('/v1/timers/%s' % (trigger_id), expect_errors=True)
expected_msg = ('User "timer_list" doesn\'t have required permission "timer_view"'
' on resource "%s"' % (timer_uid))
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_permission_success_get_all_no_permission_failure(self):
user_db = self.users['timer_view']
self.use_user(user_db)
# timer_view permission, but no timer_list permission
trigger_db = self.models['triggers']['cron1.yaml']
trigger_id = trigger_db.id
trigger_uid = trigger_db.get_uid()
resp = self.app.get('/v1/timers/%s' % (trigger_id))
self.assertEqual(resp.status_code, httplib.OK)
self.assertEqual(resp.json['uid'], trigger_uid)
resp = self.app.get('/v1/timers', expect_errors=True)
expected_msg = ('User "timer_view" doesn\'t have required permission "timer_list"')
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
| apache-2.0 | -7,138,431,268,694,937,000 | 43.550296 | 98 | 0.659184 | false |
sinneb/pyo-patcher | pyoscript.py | 1 | 2097 | #!/usr/bin/env python
from pyo import *
from time import sleep
s = Server(audio='jack')
s.setMidiInputDevice(99)
s.boot().start()
dummy = LFO(freq=0)
dummymidi = Notein()
dummytable = NewTable(length=1, chnls=1)
dummyinput = Input(chnl=0, mul=.7)
table_pyo4e06153256c49c = SndTable("webroot/AKWF_0019.wav")
pyocd2b6bb4e9a09 = SndTable("webroot/AKWF_0008.wav")
pyo53964524ddada4 = SndTable("webroot/AKWF_vgame_0026.wav")
pyo4e06153256c49c = OscTrig(mul = 1, phase = 0, trig = dummy, freq = [0.1]*1, add = 0, table=table_pyo4e06153256c49c)
pyoaa1c0095f95c18 = TrigRand(mul = 1, port = 1, min = 100, input = dummy, init = 110, add = 0, max = 110)
pyof43fd9decf584 = Mixer(outs=2, chnls=10).out()
pyo3276302463575 = Beat(time = 0.5, w1 = 80, w2 = 50, taps = 16, poly = 1, w3 = 30, onlyonce = False).play()
pyoa0f0f8b540dc9 = Allpass(input=[dummy]*10, delay=0, feedback=0, maxdelay=0, mul=0.33)
pyo9b6c493e9f96b8 = Iter(mul = 1, input = dummy, choice = [0.5,0.3,0.2], init = 0, add = 0)
pyo43374e9861e778 = Beat(time = 0.125, w1 = 80, w2 = 50, taps = 16, poly = 1, w3 = 30, onlyonce = False).play()
pyob4690d4b79f8 = Osc(mul = 1, table = dummytable, phase = 0, freq = [0.1]*1, add = 0)
pyo137b26190a5422 = Osc(mul = 1, table = dummytable, phase = 0, freq = [0.1]*1, add = 0)
pyof43fd9decf584.addInput(10,pyo4e06153256c49c)
pyof43fd9decf584.setAmp(10,0,1)
pyof43fd9decf584.setAmp(10,1,1)
pyoa0f0f8b540dc9.input = pyoaa1c0095f95c18
pyob4690d4b79f8.freq = pyoaa1c0095f95c18
pyo137b26190a5422.freq = pyoaa1c0095f95c18
pyoaa1c0095f95c18.input = pyo3276302463575
pyo4e06153256c49c.freq = pyoa0f0f8b540dc9
pyoa0f0f8b540dc9.mul = pyo9b6c493e9f96b8
pyob4690d4b79f8.mul = pyo9b6c493e9f96b8
pyo137b26190a5422.mul = pyo9b6c493e9f96b8
pyo9b6c493e9f96b8.input = pyo43374e9861e778
pyof43fd9decf584.addInput(11,pyob4690d4b79f8)
pyof43fd9decf584.setAmp(11,0,1)
pyof43fd9decf584.setAmp(11,1,1)
pyob4690d4b79f8.table = pyocd2b6bb4e9a09
pyof43fd9decf584.addInput(12,pyo137b26190a5422)
pyof43fd9decf584.setAmp(12,0,1)
pyof43fd9decf584.setAmp(12,1,1)
pyo137b26190a5422.table = pyo53964524ddada4
while True:
sleep(1)
| mit | -9,220,165,762,181,545,000 | 40.117647 | 117 | 0.749642 | false |
PIVX-Project/PIVX | test/functional/wallet_zapwallettxes.py | 1 | 3478 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two pivxd nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class ZapWalletTXesTest (PivxTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 250)
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(self.num_nodes)]
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
self.sync_mempools(wait=.1)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Exercise balance rpcs
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], 20)
assert_equal(self.nodes[1].getunconfirmedbalance(), 20)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop nodes and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
# original balances are restored
for i in range(1, 3):
self.log.info("Restarting with --zapwallettxes=%d" % i)
self.stop_nodes()
self.start_node(0, ["-zapwallettxes=%d" % i])
self.start_node(1, ["-zapwallettxes=%d" % i])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
# Check (confirmed) balances
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(self.num_nodes)])
if __name__ == '__main__':
ZapWalletTXesTest().main()
| mit | 2,287,670,572,320,073,700 | 41.938272 | 116 | 0.676251 | false |
PYPIT/PYPIT | pypeit/tests/tstutils.py | 1 | 7296 | # Odds and ends in support of tests
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import pytest
import numpy as np
import copy
from astropy import time
from pypeit import arcimage
from pypeit import traceslits
from pypeit import wavecalib
from pypeit import wavetilts
from pypeit.spectrographs.util import load_spectrograph
from pypeit.metadata import PypeItMetaData
# Create a decorator for tests that require the PypeIt dev suite
dev_suite_required = pytest.mark.skipif(os.getenv('PYPEIT_DEV') is None,
reason='test requires dev suite')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def dummy_fitstbl(nfile=10, spectro_name='shane_kast_blue', directory='', notype=False):
"""
Generate a dummy fitstbl for testing
Parameters
----------
nfile : int, optional
Number of files to mimic
spectro_name : str, optional
Name of spectrograph to mimic
notype : bool (optional)
If True, do not add image type info to the fitstbl
Returns
-------
fitstbl : PypeItMetaData
"""
fitsdict = {}
fitsdict['index'] = np.arange(nfile)
fitsdict['directory'] = [directory]*nfile
fitsdict['filename'] = ['b{:03d}.fits.gz'.format(i) for i in range(nfile)]
# TODO: The below will fail at 60
dates = ['2015-01-23T00:{:02d}:11.04'.format(i) for i in range(nfile)]
ttime = time.Time(dates, format='isot')
fitsdict['mjd'] = ttime.mjd
fitsdict['target'] = ['Dummy']*nfile
fitsdict['ra'] = ['00:00:00']*nfile
fitsdict['dec'] = ['+00:00:00']*nfile
fitsdict['exptime'] = [300.] * nfile
fitsdict['dispname'] = ['600/4310'] * nfile
fitsdict['dichroic'] = ['560'] * nfile
fitsdict["binning"] = ['1,1']*nfile
fitsdict["airmass"] = [1.0]*nfile
if spectro_name == 'shane_kast_blue':
fitsdict['numamplifiers'] = [1] * nfile
# Lamps
for i in range(1,17):
fitsdict['lampstat{:02d}'.format(i)] = ['off'] * nfile
fitsdict['exptime'][0] = 0 # Bias
fitsdict['lampstat06'][1] = 'on' # Arc
fitsdict['exptime'][1] = 30 # Arc
fitsdict['lampstat01'][2] = 'on' # Trace, pixel, slit flat
fitsdict['lampstat01'][3] = 'on' # Trace, pixel, slit flat
fitsdict['exptime'][2] = 30 # flat
fitsdict['exptime'][3] = 30 # flat
fitsdict['ra'][4] = '05:06:36.6' # Standard
fitsdict['dec'][4] = '52:52:01.0'
fitsdict['airmass'][4] = 1.2
fitsdict['ra'][5] = '07:06:23.45' # Random object
fitsdict['dec'][5] = '+30:20:50.5'
fitsdict['decker'] = ['0.5 arcsec'] * nfile
# arrays
for k in fitsdict.keys():
fitsdict[k] = np.array(fitsdict[k])
spectrograph = load_spectrograph(spectro_name)
fitstbl = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), data=fitsdict)
fitstbl['instrume'] = spectro_name
type_bits = np.zeros(len(fitstbl), dtype=fitstbl.type_bitmask.minimum_dtype())
# Image typing
if not notype:
if spectro_name == 'shane_kast_blue':
#fitstbl['sci_ID'] = 1 # This links all the files to the science object
type_bits[0] = fitstbl.type_bitmask.turn_on(type_bits[0], flag='bias')
type_bits[1] = fitstbl.type_bitmask.turn_on(type_bits[1], flag='arc')
type_bits[2:4] = fitstbl.type_bitmask.turn_on(type_bits[2:4], flag=['pixelflat', 'trace'])
type_bits[4] = fitstbl.type_bitmask.turn_on(type_bits[4], flag='standard')
type_bits[5:] = fitstbl.type_bitmask.turn_on(type_bits[5:], flag='science')
fitstbl.set_frame_types(type_bits)
# Calibration groups
cfgs = fitstbl.unique_configurations(ignore_frames=['bias', 'dark'])
fitstbl.set_configurations(cfgs)
fitstbl.set_calibration_groups(global_frames=['bias', 'dark'])
return fitstbl
def load_kast_blue_masters(get_spectrograph=False, aimg=False, tslits=False, tilts=False,
datasec=False, wvcalib=False):
"""
Load up the set of shane_kast_blue master frames
Args:
get_spectrograph:
aimg:
tslits:
tilts:
datasec:
wvcalib:
Returns:
"""
spectrograph = load_spectrograph('shane_kast_blue')
spectrograph.naxis = (2112,350) # Image shape with overscan
root_path = data_path('MF') if os.getenv('PYPEIT_DEV') is None \
else os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'MF')
master_dir = root_path+'_'+spectrograph.spectrograph
reuse_masters = True
# Load up the Masters
ret = []
if get_spectrograph:
ret.append(spectrograph)
master_key = 'A_1_01'
if aimg:
AImg = arcimage.ArcImage(spectrograph, master_key=master_key, master_dir=master_dir, reuse_masters=reuse_masters)
msarc, _ = AImg.load_master(AImg.ms_name)
ret.append(msarc)
if tslits:
traceSlits = traceslits.TraceSlits(None,spectrograph,None)
# TODO: Should this be json now?
tslits_dict, mstrace = traceSlits.load_master(os.path.join(master_dir,'MasterTrace_A_1_01.fits'))
# This is a bit of a hack, but I'm adding the mstrace to the dict since we need it in the flat field test
tslits_dict['mstrace'] = mstrace
ret.append(tslits_dict)
if tilts:
wvTilts = wavetilts.WaveTilts(None, None, spectrograph, None, None, master_key=master_key,
master_dir=master_dir, reuse_masters=reuse_masters)
tilts_dict, _ = wvTilts.master()
ret.append(tilts_dict)
if datasec:
datasec_img = spectrograph.get_datasec_img(data_path('b1.fits.gz'), 1)
ret.append(datasec_img)
if wvcalib:
Wavecalib = wavecalib.WaveCalib(None, None, spectrograph,
spectrograph.default_pypeit_par()['calibrations']['wavelengths'],
master_key=master_key,
master_dir=master_dir, reuse_masters=reuse_masters)
wv_calib, _ = Wavecalib.master()
ret.append(wv_calib)
# Return
return ret
def instant_traceslits(mstrace_file, det=None):
"""
Instantiate a TraceSlits object from the master file
The loaded tslits_dict is set as the atribute
Args:
mstrace_file (str):
det (int, optional):
Returns:
Spectrograph, TraceSlits:
"""
# Load
tslits_dict, mstrace = traceslits.load_tslits(mstrace_file)
# Instantiate
spectrograph = load_spectrograph(tslits_dict['spectrograph'])
par = spectrograph.default_pypeit_par()
msbpm = spectrograph.bpm(shape=mstrace.shape, det=det)
binning = tslits_dict['binspectral'], tslits_dict['binspatial']
traceSlits = traceslits.TraceSlits(mstrace, spectrograph, par['calibrations']['slits'],
msbpm=msbpm, binning=binning)
traceSlits.tslits_dict = copy.deepcopy(tslits_dict)
return spectrograph, traceSlits
| gpl-3.0 | -6,837,063,038,903,173,000 | 35.118812 | 121 | 0.614446 | false |
lhaze/dharma | pca/data/observable.py | 1 | 9197 | import typing as t
from pca.utils.collections import OrderedSet
from pca.utils.inspect import is_argspec_valid
from pca.utils.sentinel import Sentinel
# sentinel object for expressing that a value of a observable hasn't been set
# NB: None object might be a valid value
undefined_value = Sentinel(module="pca.data.observable", name="undefined_value")
Owner = t.TypeVar("Owner")
Value = t.TypeVar("Value")
Preprocessor = t.Callable[[Value], Value]
Validator = t.Callable[[Value], None] # raises errors
Observer = t.Callable[[Owner, Value, Value], None]
class Observable:
_owner: Owner
_label: str
# pattern for __dict__ key on the owner class; space char is intended to be
# sure we are not colliding with any proper attribute name
_observer_key_pattern = "%s instance observers"
def __init__(
self,
default=undefined_value,
preprocessor: Preprocessor = None,
validator: Validator = None,
class_observers: t.Iterable[Observer] = None,
):
"""
Params:
default -- default value of the Observable. It is not validated (you are supposed to
know what you are doing).
Default: sentinel object "pca.data.observable.undefined_value"
preprocessor -- a callable of signature:
(raw_value) -> value
which prepares what's going-to-be new value before the assignment and the validity
checks. It's usable when you want to cast the value, instantiate a class to be
a value or something similar.
validator -- a callable of signature:
(old_value) -> None
which checks if the value is valid, whatever that means to the observable.
class_observers -- iterable of per-class observers. With this argument you can declare
the observers during Observable declaration.
"""
self.default = default
self.preprocessor = preprocessor
self.validator = validator
self._class_observers = OrderedSet(class_observers) if class_observers else OrderedSet()
def __set_name__(self, owner, name):
self._owner = owner
self._label = name
@property
def label(self) -> str:
"""Python name of the attribute under which the observable is hung on the owner class."""
return self._label
def _get_value(self, instance: Owner) -> Value:
"""Technical detail of retrieving the value from the instance `__dict__`."""
return instance.__dict__.get(self._label, self.default)
def _set_value(self, instance: Owner, value: Value):
"""Technical detail of setting the value at the instance `__dict__`."""
instance.__dict__[self._label] = value
def __get__(self, instance: Owner, owner: t.Type) -> Value:
"""
If accessed from the instance, gets the value of the observable from
the owner's `__dict__`.
If accessed from the class, lets interact with the observable itself.
"""
if instance is None:
return self
return self._get_value(instance)
def __set__(self, instance: Owner, new_value: Value) -> None:
"""
Iff value has changed, __set__ processes the value, validates it, updates itself and
notifies all change observers.
"""
old_value = self._get_value(instance)
new_value = self._preprocess(new_value)
if new_value is not old_value:
# logic fires only in the case when the value changes
self._validate(instance, new_value)
# the new value is assumed to be valid
self._set_value(instance, new_value)
# notify observers about the change done
self._notify(instance, old_value, new_value)
def __delete__(self, instance: Owner) -> None:
"""
Sets default (undefined_value by default) as the value of the observable.
NB: if the observable hasn't been set at all, there's no value, there is only
a `self.default` attribute.
"""
self._set_value(instance, self.default)
def _preprocess(self, value: Value) -> Value:
"""
Prepares assigned value BEFORE value is checked whether it is to be changed. Useful if
your assigning process has to change the value in some way, ie. instantiates the class
of the value or casts the value.
Params:
value -- raw value to reprocess.
Returns:
Preprocessed value.
"""
if not self.preprocessor:
return value
return self.preprocessor(value)
def _validate(self, instance: Owner, new_value: Value = None) -> None:
"""
Fires validator using instance, old_value and new_value as arguments.
The `validate` method may be called with a to-be-assigned value as the `new_value`
in purpose of validating it pre-assignment; or without a new_value which means that
the current value is validated.
Params:
instance -- instance of the owner
new_value -- a value which is supposed to be set on the observable; the default value
is the current value of the observable
Raises:
errors that are used by the validator
"""
if not self.validator:
return
old_value = self._get_value(instance)
new_value = new_value or old_value
self.validator(instance, old_value, new_value)
def _notify(self, instance: Owner, old_value: Value, new_value: Value) -> None:
"""
Fires notifications to per-class and per-instance observers. Old value is passed
as an argument, new value is just the current value (we are at the point right after
the assignment).
Params:
instance -- instance of the owner.
old_value -- value before assignment.
new_value -- current value of the observable.
"""
# per-instance observers
key = self._observer_key_pattern % self._label
for observer in instance.__dict__.get(key, ()):
observer(instance, old_value, new_value)
# per-class observers
for observer in self._class_observers or ():
observer(instance, old_value, new_value)
def add_class_observer(self, observer: Observer) -> None:
"""
Adds a function or method as a change observer on per-class basis.
Params:
observer -- a function or method of
(Observable, old_value, new_value) -> None
signature that is going to be called whenever the observable changes its value.
It is supposed to serve as a observer of the observable's value.
"""
# TODO assert observer signature is valid
self._class_observers.add(observer)
def class_observer(self, observer: Observer) -> Observer:
"""
Decorator that marks a function or method as a change observer on per-class basis.
Params:
observer -- a function or method of
(observable, old_value, new_value) -> None
signature that is going to be called whenever the observable changes its value.
It is supposed to serve as a observer of the observable's value.
Returns:
the initial `observer` argument untouched. It just adds it to the internal collection
of observers of the Observable.
Example usage:
class MyObject:
some_observable = Observable()
@some_observable.class_observer
def some_observable_activated(self, old_value, new_value, observable):
do_sth_here
"""
# TODO assert observer signature is valid
self.add_class_observer(observer)
# it's intended as a decorator, so return the `observer` untouched
return observer
def add_instance_observer(self, instance: Owner, observer: Observer) -> None:
"""
Adds a function or method as a change observer on per-instance basis.
Params:
instance - the owner instance that the observer is connected to
observer - a function or method of
(owner_instance, old_value, new_value) -> None
signature that is going to be called whenever the observable changes its value.
It is supposed to serve as a observer of the observable's value.
"""
# we're asserting valid observer signature (without relying on
# duck-typing), because observer is passed here, but the potential
# TypeError is going to be raised much further during the runtime
# TODO assert observer signature is valid
assert is_argspec_valid(observer, arg_number=3)
# per-instance change observers
# observable signs itself with its label in the instance __dict__
key = self._observer_key_pattern % self._label
instance.__dict__.setdefault(key, OrderedSet()).add(observer)
| mit | 1,102,903,874,814,059,900 | 40.242152 | 98 | 0.620746 | false |
iConor/lego-lirc | power_functions/single_output.py | 1 | 2799 | """LEGO Power Functions RC v1.20 - Single Output Mode"""
import power_functions.rc_protocol as pf_rc
# Mode Bits
MODE = ["PWM", # Mode = PWM
"CSTID"] # Mode = Clear/Set/Toggle/Inc/Dec
OUTPUT = ["RED", # Output A
"BLU"] # Output B
# Data if Mode = PWM
PWM = ["FLT", # Float
"FWD_1", # PWM forward, step 1
"FWD_2", # PWM forward, step 2
"FWD_3", # PWM forward, step 3
"FWD_4", # PWM forward, step 4
"FWD_5", # PWM forward, step 5
"FWD_6", # PWM forward, step 6
"FWD_7", # PWM forward, step 7
"BRK", # Brake then float
"REV_7", # PWM backward, step 7
"REV_6", # PWM backward, step 6
"REV_5", # PWM backward, step 5
"REV_4", # PWM backward, step 4
"REV_3", # PWM backward, step 3
"REV_2", # PWM backward, step 2
"REV_1"] # PWM backward, step 1
# Data if Mode = Clear/Set/Toggle/Inc/Dec
CSTID = ["TGL_FWD", # Toggle full forward
"TGL_DIR", # Toggle direction
"INC_NUM", # Increment numerical PWM
"DEC_NUM", # Decrement numerical PWM
"INC_PWM", # Increment PWM
"DEC_PWM", # Decrement PWM
"FWD_TO", # Full forward (timeout)
"REV_TO", # Full backward (timeout)
"TGL_FR", # Toggle full forward/backward
"CLR_C1", # Clear C1
"SET_C1", # Set C1
"TGL_C1", # Toggle C1
"CLR_C2", # Clear C2
"SET_C2", # Set C2
"TGL_C2", # Toggle C2
"TGL_REV"] # Toggle full backward
def payload(channel, mode, output, data, _esc=pf_rc.ESC.MODE, _addr=pf_rc.ADDR.DEF):
"""Returns the payload for a Single Output Mode command."""
nibble1 = _esc | channel
nibble2 = _addr | pf_rc.MODE.SNGL | (mode << 1) | output
nibble3 = data
nibble4 = pf_rc.lrc(nibble1, nibble2, nibble3)
return nibble1, nibble2, nibble3, nibble4
def button(channel, mode, output, data):
"""Returns the button for a Single Output Mode command."""
if mode == 0:
data = PWM[data]
else:
data = CSTID[data]
return (pf_rc.CHANNEL[channel], OUTPUT[output], data)
def button_string(channel, output, data):
"""Returns the string representation of a Single Output Mode button."""
return 'CH{:s}_{:s}_{:s}'.format(channel, output, data)
def lirc_codes():
"""Prints LIRC codes for Single Output Mode."""
for i in range(0, 4):
for j in range(0, 4):
for k in range(0, 16):
mode = (j & 0x2) >> 1
output = j & 0x1
hex_codes = pf_rc.payload_string(*payload(i, mode, output, k))
lirc_patterns = button_string(*button(i, mode, output, k))
print "\t{}\t\t{}".format(lirc_patterns, hex_codes)
| mit | -2,519,746,911,384,808,400 | 35.350649 | 84 | 0.548767 | false |
bpdavidson/s2n | tests/integration/s2n_handshake_test_s_client.py | 1 | 20654 | #
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl s_client against s2nd
Openssl 1.1.0 removed SSLv3, 3DES, an RC4, so we won't have coverage there.
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import threading
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
from time import sleep
PROTO_VERS_TO_S_CLIENT_ARG = {
S2N_TLS10 : "-tls1",
S2N_TLS11 : "-tls1_1",
S2N_TLS12 : "-tls1_2",
}
S_CLIENT_SUCCESSFUL_OCSP="OCSP Response Status: successful"
def communicate_processes(*processes):
outs = []
for p in processes:
p.kill()
out = p.communicate()[0].decode("utf-8").split('\n')
outs.append(out)
return outs
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def try_handshake(endpoint, port, cipher, ssl_version, server_cert=None, server_key=None, ocsp=None, sig_algs=None, curves=None, resume=False, no_ticket=False,
prefer_low_latency=False, enter_fips_mode=False, client_auth=None, client_cert=DEFAULT_CLIENT_CERT_PATH, client_key=DEFAULT_CLIENT_KEY_PATH):
"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd"]
if server_cert is not None:
s2nd_cmd.extend(["--cert", server_cert])
if server_key is not None:
s2nd_cmd.extend(["--key", server_key])
if ocsp is not None:
s2nd_cmd.extend(["--ocsp", ocsp])
if prefer_low_latency == True:
s2nd_cmd.append("--prefer-low-latency")
if client_auth is not None:
s2nd_cmd.append("-m")
s2nd_cmd.extend(["-t", client_cert])
s2nd_cmd.extend([str(endpoint), str(port)])
s2nd_ciphers = "test_all"
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
if "ECDSA" in cipher:
s2nd_ciphers = "test_all_ecdsa"
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if no_ticket:
s2nd_cmd.append("-T")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure it's running
sleep(0.1)
s_client_cmd = ["openssl", "s_client", PROTO_VERS_TO_S_CLIENT_ARG[ssl_version],
"-connect", str(endpoint) + ":" + str(port)]
if cipher is not None:
s_client_cmd.extend(["-cipher", cipher])
if sig_algs is not None:
s_client_cmd.extend(["-sigalgs", sig_algs])
if curves is not None:
s_client_cmd.extend(["-curves", curves])
if resume == True:
s_client_cmd.append("-reconnect")
if client_auth is not None:
s_client_cmd.extend(["-key", client_key])
s_client_cmd.extend(["-cert", client_cert])
if ocsp is not None:
s_client_cmd.append("-status")
# Fire up s_client
s_client = subprocess.Popen(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
# Wait for resumption
sleep(0.1)
# Write the cipher name towards s2n server
msg = (cipher + "\n").encode("utf-8")
s_client.stdin.write(msg)
s_client.stdin.flush()
# Wait for pipe ready for write
sleep(0.1)
# Write the cipher name from s2n server to client
s2nd.stdin.write(msg)
s2nd.stdin.flush()
# Wait for pipe ready for read
sleep(0.1)
outs = communicate_processes(s_client, s2nd)
s_out = outs[1]
if '' == s_out:
print ("No output from client PIPE, skip")
return 0
c_out = outs[0]
if '' == c_out:
print ("No output from client PIPE, skip")
return 0
s_out_len = len (s_out)
c_out_len = len (c_out)
# Validate that s_client resumes successfully against s2nd
s_line = 0
if resume is True:
seperators = 0
for i in range(0, s_out_len):
s_line = i
output = s_out[i].strip()
if output.startswith("Resumed session"):
seperators += 1
if seperators == 5:
break
if seperators != 5:
print ("Validate resumes failed")
return -1
# Validate that s_client accepted s2nd's stapled OCSP response
c_line = 0
if ocsp is not None:
ocsp_success = False
for i in range(0, c_out_len):
c_line = i
output = c_out[i].strip()
if S_CLIENT_SUCCESSFUL_OCSP in output:
ocsp_success = True
break
if not ocsp_success:
print ("Validate OCSP failed")
return -1
# Analyze server output
found = 0
for i in range(s_line, s_out_len):
output = s_out[i].strip()
if output == cipher:
found = 1
break
if found == 0:
print ("No cipher output from server")
return -1
found = 0
for i in range(c_line, c_out_len):
output = c_out[i].strip()
if output == cipher:
found = 1
break
if found == 0:
print ("No cipher output from client")
return -1
return 0
def cert_path_to_str(cert_path):
# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
return '-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 2 #Multiply by 2 since performance improves slightly if CPU has hyperthreading
print("\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_handshake_test(host, port, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, client_cert_path, client_key_path):
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.0
if not cipher.openssl_1_1_0_compatible:
return 0
if ssl_version < cipher_vers:
return 0
client_cert_str=str(use_client_auth)
if (use_client_auth is not None) and (client_cert_path is not None):
client_cert_str = cert_path_to_str(client_cert_path)
ret = try_handshake(host, port, cipher_name, ssl_version, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth, client_cert=client_cert_path, client_key=client_key_path)
result_prefix = "Cipher: %-28s ClientCert: %-16s Vers: %-8s ... " % (cipher_name, client_cert_str, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def handshake_test(host, port, test_ciphers, fips_mode, no_ticket=False, use_client_auth=None, use_client_cert=None, use_client_key=None):
"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""
print("\n\tRunning handshake tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
threadpool = create_thread_pool()
port_offset = 0
results = []
for cipher in test_ciphers:
async_result = threadpool.apply_async(run_handshake_test, (host, port + port_offset, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, use_client_cert, use_client_key))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
failed = 1
return failed
def client_auth_test(host, port, test_ciphers, fips_mode):
failed = 0
print("\n\tRunning client auth tests:")
for filename in os.listdir(TEST_CERT_DIRECTORY):
if "client_cert" in filename and "rsa" in filename:
client_cert_path = TEST_CERT_DIRECTORY + filename
client_key_path = TEST_CERT_DIRECTORY + filename.replace("client_cert", "client_key")
ret = handshake_test(host, port, test_ciphers, fips_mode, no_ticket=True, use_client_auth=True, use_client_cert=client_cert_path, use_client_key=client_key_path)
if ret is not 0:
failed += 1
return failed
def resume_test(host, port, test_ciphers, fips_mode, no_ticket=False):
"""
Tests s2n's session resumption capability using all valid combinations of cipher suite and TLS version.
"""
if no_ticket:
print("\n\tRunning resumption tests using session id:")
else:
print("\n\tRunning resumption tests using session ticket:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
for cipher in test_ciphers:
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.0
if not cipher.openssl_1_1_0_compatible:
continue
if ssl_version < cipher_vers:
continue
ret = try_handshake(host, port, cipher_name, ssl_version, resume=True, no_ticket=no_ticket, enter_fips_mode=fips_mode)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
if ret != 0:
failed = 1
return failed
supported_sigs = ["RSA+SHA1", "RSA+SHA224", "RSA+SHA256", "RSA+SHA384", "RSA+SHA512"]
unsupported_sigs = ["ECDSA+SHA256", "ECDSA+SHA512"]
def run_sigalg_test(host, port, cipher, ssl_version, permutation, fips_mode, use_client_auth, no_ticket):
# Put some unsupported algs in front to make sure we gracefully skip them
mixed_sigs = unsupported_sigs + list(permutation)
mixed_sigs_str = ':'.join(mixed_sigs)
ret = try_handshake(host, port, cipher.openssl_name, ssl_version, sig_algs=mixed_sigs_str, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth)
# Trim the RSA part off for brevity. User should know we are only supported RSA at the moment.
prefix = "Digests: %-35s ClientAuth: %-6s Vers: %-8s... " % (':'.join([x[4:] for x in permutation]), str(use_client_auth), S2N_PROTO_VERS_TO_STR[S2N_TLS12])
print_result(prefix, ret)
return ret
def sigalg_test(host, port, fips_mode, use_client_auth=None, no_ticket=False):
"""
Acceptance test for supported signature algorithms. Tests all possible supported sigalgs with unsupported ones mixed in
for noise.
"""
failed = 0
print("\n\tRunning signature algorithm tests:")
print("\tExpected supported: " + str(supported_sigs))
print("\tExpected unsupported: " + str(unsupported_sigs))
for size in range(1, len(supported_sigs) + 1):
print("\n\t\tTesting ciphers using signature preferences of size: " + str(size))
threadpool = create_thread_pool()
portOffset = 0
results = []
# Produce permutations of every accepted signature algorithm in every possible order
for permutation in itertools.permutations(supported_sigs, size):
for cipher in ALL_TEST_CIPHERS:
# Try an ECDHE cipher suite and a DHE one
if(cipher.openssl_name == "ECDHE-RSA-AES128-GCM-SHA256" or cipher.openssl_name == "DHE-RSA-AES128-GCM-SHA256"):
async_result = threadpool.apply_async(run_sigalg_test, (host, port + portOffset, cipher, S2N_TLS12, permutation, fips_mode, use_client_auth, no_ticket))
portOffset = portOffset + 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
failed = 1
return failed
def elliptic_curve_test(host, port, fips_mode):
"""
Acceptance test for supported elliptic curves. Tests all possible supported curves with unsupported curves mixed in
for noise.
"""
supported_curves = ["P-256", "P-384"]
unsupported_curves = ["B-163", "K-409"]
print("\n\tRunning elliptic curve tests:")
print("\tExpected supported: " + str(supported_curves))
print("\tExpected unsupported: " + str(unsupported_curves))
failed = 0
for size in range(1, len(supported_curves) + 1):
print("\n\t\tTesting ciphers using curve list of size: " + str(size))
# Produce permutations of every accepted curve in every possible order
for permutation in itertools.permutations(supported_curves, size):
# Put some unsupported curves in front to make sure we gracefully skip them
mixed_curves = unsupported_curves + list(permutation)
mixed_curves_str = ':'.join(mixed_curves)
for cipher in filter(lambda x: x.openssl_name == "ECDHE-RSA-AES128-GCM-SHA256" or x.openssl_name == "ECDHE-RSA-AES128-SHA", ALL_TEST_CIPHERS):
if fips_mode and cipher.openssl_fips_compatible == False:
continue
ret = try_handshake(host, port, cipher.openssl_name, S2N_TLS12, curves=mixed_curves_str, enter_fips_mode=fips_mode)
prefix = "Curves: %-40s Vers: %10s ... " % (':'.join(list(permutation)), S2N_PROTO_VERS_TO_STR[S2N_TLS12])
print_result(prefix, ret)
if ret != 0:
failed = 1
return failed
def elliptic_curve_fallback_test(host, port, fips_mode):
"""
Tests graceful fallback when s2n doesn't support any curves offered by the client. A non-ecc suite should be
negotiated.
"""
failed = 0
# Make sure s2n can still negotiate a non-EC kx(AES256-GCM-SHA384) suite if we don't match anything on the client
unsupported_curves = ["B-163", "K-409"]
ret = try_handshake(host, port, "ECDHE-RSA-AES128-SHA256:AES256-GCM-SHA384", S2N_TLS12, curves=":".join(unsupported_curves), enter_fips_mode=fips_mode)
print_result("%-65s ... " % "Testing curve mismatch fallback", ret)
if ret != 0:
failed = 1
return failed
def handshake_fragmentation_test(host, port, fips_mode):
"""
Tests successful negotation with s_client despite message fragmentation. Max record size is clamped to force s2n
to fragment the ServerCertifcate message.
"""
print("\n\tRunning handshake fragmentation tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
# Cipher isn't relevant for this test, pick one available in all OpenSSL versions and all TLS versions
cipher_name = "AES256-SHA"
# Low latency option indirectly forces fragmentation.
ret = try_handshake(host, port, cipher_name, ssl_version, prefer_low_latency=True, enter_fips_mode=fips_mode)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
if ret != 0:
failed = 1
failed = 0
return failed
def ocsp_stapling_test(host, port, fips_mode):
"""
Test s2n's server OCSP stapling capability
"""
print("\n\tRunning OCSP stapling tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
# Cipher isn't relevant for this test, pick one available in all TLS versions
cipher_name = "AES256-SHA"
ret = try_handshake(host, port, cipher_name, ssl_version, enter_fips_mode=fips_mode, server_cert=TEST_OCSP_CERT, server_key=TEST_OCSP_KEY,
ocsp=TEST_OCSP_RESPONSE_FILE)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
if ret != 0:
failed = 1
return failed
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--libcrypto', default='openssl-1.1.0', choices=['openssl-1.0.2', 'openssl-1.0.2-fips', 'openssl-1.1.0', 'openssl-1.1.x-master', 'libressl'],
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.0.""")
args = parser.parse_args()
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning tests with: " + os.popen('openssl version').read())
failed = 0
failed += resume_test(host, port, test_ciphers, fips_mode, no_ticket=True)
failed += resume_test(host, port, test_ciphers, fips_mode)
failed += handshake_test(host, port, test_ciphers, fips_mode)
failed += client_auth_test(host, port, test_ciphers, fips_mode)
failed += sigalg_test(host, port, fips_mode)
failed += sigalg_test(host, port, fips_mode, use_client_auth=True, no_ticket=True)
failed += elliptic_curve_test(host, port, fips_mode)
failed += elliptic_curve_fallback_test(host, port, fips_mode)
failed += handshake_fragmentation_test(host, port, fips_mode)
failed += ocsp_stapling_test(host, port, fips_mode)
return failed
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -6,687,215,126,450,706,000 | 39.182879 | 196 | 0.641571 | false |
corerd/PyDomo | pydimage/benchmark.py | 1 | 6974 | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Compare performance between PIL and OpenCV'''
from __future__ import print_function
import os
import sys
import cv2
from PIL import Image
from time import time
from pilib import ExtendedImage as pilImage
from cvlib import ExtendedImage as cvImage
class Benchmark(object):
def __init__(self, bmClass):
self.imageClass = bmClass
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
#img = self.imageClass(src_image_file).greyscale()
img = self.imageClass(src_image_file)
deltat = time()
img.greyscale()
pixel_counts = img.histogram()
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d pictures in %f seconds' % (self.runs, self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
class Benchmark_PIL(object):
def __init__(self):
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
self.width_ave = 0
self.height_ave = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
img = Image.open(src_image_file)
width, height = img.size
self.width_ave = self.width_ave + width
self.height_ave = self.height_ave + height
deltat = time()
img = img.convert(mode='L')
pixel_counts = img.histogram()
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d %dx%d pictures in %f seconds' %
(self.runs, (self.width_ave/self.runs), (self.height_ave/self.runs), self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
class Benchmark_OpenCV(object):
def __init__(self):
self.runs = 0
self.deltat_min = sys.maxint
self.deltat_max = 0
self.elapsedt = 0
self.width_ave = 0
self.height_ave = 0
def run(self, src_image_file):
'''Compute the spent time
converting a color image to greyscale
and returning the pixel counts (histogram).
'''
self.runs = self.runs + 1
#img = cv2.imread(src_image_file, cv2.IMREAD_GRAYSCALE)
img = cv2.imread(src_image_file)
height, width = img.shape[:2]
self.width_ave = self.width_ave + width
self.height_ave = self.height_ave + height
deltat = time()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
pixel_counts = cv2.calcHist([img],[0],None,[256],[0,256])
deltat = time() - deltat
if self.deltat_min > deltat:
self.deltat_min = deltat
if self.deltat_max < deltat:
self.deltat_max = deltat
self.elapsedt = self.elapsedt + deltat
def report(self):
print('Read %d %dx%d pictures in %f seconds' %
(self.runs, (self.width_ave/self.runs), (self.height_ave/self.runs), self.elapsedt))
print('deltat min: %fs' % self.deltat_min)
print('deltat max: %fs' % self.deltat_max)
print('deltat average %fs:' % (self.elapsedt / self.runs))
if __name__ == "__main__":
if len(sys.argv[1:]) > 0:
imagePathName = sys.argv[1]
else:
imagePathName = '.'
loop_cnt = 0
bmPIL = Benchmark(pilImage)
bmPIL_native = Benchmark_PIL()
bmCV = Benchmark(cvImage)
bmCV_native = Benchmark_OpenCV()
if os.path.isdir(imagePathName):
# iterate top directory listing
for dirname, dirnames, filenames in os.walk(imagePathName):
for imageFileName in filenames:
if imageFileName.lower().endswith('.jpg'):
if (loop_cnt % 20) == 0:
print('Running loop %d...' % (loop_cnt+1))
loop_cnt = loop_cnt + 1
bmPIL.run(os.path.join(dirname, imageFileName))
bmPIL_native.run(os.path.join(dirname, imageFileName))
bmCV.run(os.path.join(dirname, imageFileName))
bmCV_native.run(os.path.join(dirname, imageFileName))
break # only top directory listing
else:
if imagePathName.lower().endswith('.jpg'):
# loop on single file
for loop_cnt in range(500):
if (loop_cnt % 10) == 0:
print('Running loop %d...' % (loop_cnt+1))
bmPIL.run(imagePathName)
bmPIL_native.run(imagePathName)
bmCV.run(imagePathName)
bmCV_native.run(imagePathName)
loop_cnt = loop_cnt + 1
else:
print('JPG file required')
if loop_cnt > 0:
print('\nPIL stats:')
bmPIL.report()
print('\nOpenCV stats:')
bmCV.report()
print('\nPIL native stats:')
bmPIL_native.report()
print('\nOpenCV native stats:')
bmCV_native.report()
| mit | -7,023,757,394,312,344,000 | 35.134715 | 96 | 0.60152 | false |
DarkPhoenix6/My_Libraries | Python/GameOfLife.py | 1 | 3819 | # a simple parser for python. use get_number() and get_word() to read
def parser():
while 1:
data = list(input().split(' '))
for number in data:
if len(number) > 0:
yield (number)
input_parser = parser()
def get_word():
global input_parser
return next(input_parser)
def get_number():
data = get_word()
try:
return int(data)
except ValueError:
return float(data)
# numpy and scipy are available for use
import numpy
import scipy
class Cell(object):
def __init__(self, is_alive=False):
self.is_alive = is_alive
self.alive_count = 0
self.next_alive = True
def inc_counter(self):
"""
:return: True if keep seeking for ajacent cells
"""
self.alive_count += 1
if self.alive_count <= 3:
# self.alive_count += 1
return True
else:
# self.alive_count += 1
return False
def reset_count(self):
self.alive_count = 0
def set_is_alive_next(self):
if self.is_alive:
if 1 < self.alive_count < 4:
self.next_alive = True
else:
self.next_alive = False
else:
if self.alive_count == 3:
self.next_alive = True
else:
self.next_alive = False
def set_is_alive(self):
self.is_alive = self.next_alive
self.reset_count()
class TorusBoard(object):
def __init__(self):
self.rows = get_number()
self.columns = get_number()
self.turns = get_number()
self.cells = self.generate_board() # self.state[row][column]
# self.print_board()
def generate_board(self):
columns, rows = self.columns, self.rows
m = [[Cell() for x in range(rows)] for y in range(columns)]
# board_list = []
for i in range(rows):
board_cell = get_word()
for j in range(columns):
# board_list.append(board_cell)
if board_cell[j] == "*":
m[j][i].is_alive = True
else:
m[j][i].is_alive = False
return m
def do_game(self):
while self.turns > 0:
self.do_turn()
def do_turn(self):
columns, rows = self.columns, self.rows
self.turns -= 1
for i in range(columns):
for j in range(rows):
self.check_adjacent_cells(j, i)
for i in range(columns):
for j in range(rows):
self.cells[i][j].set_is_alive()
# self.print_board()
# print(" ")
def check_adjacent_cells(self, row, column):
i_left = column - 1
i_right = column - self.columns + 1
i_below = row - self.rows + 1
i_top = row - 1
keep_loop = True
for i in [i_top, row, i_below]:
for j in [i_left, column, i_right]:
if i == row and j == column:
continue
else:
if self.cells[j][i].is_alive:
keep_loop = self.cells[column][row].inc_counter()
if not keep_loop:
break
if not keep_loop:
break
self.cells[column][row].set_is_alive_next()
def print_board(self):
columns, rows = self.columns, self.rows
m = self.cells
for i in range(rows):
print_row = ""
for j in range(columns):
if m[j][i].is_alive:
print_row = print_row + "*"
else:
print_row = print_row + "-"
print(print_row)
b = TorusBoard()
b.do_game()
b.print_board() | gpl-3.0 | 3,992,101,306,870,500,400 | 25.164384 | 73 | 0.485729 | false |
Inspq/ansible | lib/ansible/modules/network/nxos/nxos_bgp_neighbor.py | 1 | 22142 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_bgp_neighbor
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP neighbors configurations.
description:
- Manages BGP neighbors configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- C(state=absent) removes the whole BGP neighbor configuration.
- Default, where supported, restores params default value.
options:
asn:
description:
- BGP autonomous system number. Valid values are string,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
required: false
default: default
neighbor:
description:
- Neighbor Identifier. Valid values are string. Neighbors may use
IPv4 or IPv6 notation, with or without prefix length.
required: true
description:
description:
- Description of the neighbor.
required: false
default: null
connected_check:
description:
- Configure whether or not to check for directly connected peer.
required: false
choices: ['true', 'false']
default: null
capability_negotiation:
description:
- Configure whether or not to negotiate capability with
this neighbor.
required: false
choices: ['true', 'false']
default: null
dynamic_capability:
description:
- Configure whether or not to enable dynamic capability.
required: false
choices: ['true', 'false']
default: null
ebgp_multihop:
description:
- Specify multihop TTL for a remote peer. Valid values are
integers between 2 and 255, or keyword 'default' to disable
this property.
required: false
default: null
local_as:
description:
- Specify the local-as number for the eBGP neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
required: false
default: null
log_neighbor_changes:
description:
- Specify whether or not to enable log messages for neighbor
up/down event.
required: false
choices: ['enable', 'disable', 'inherit']
default: null
low_memory_exempt:
description:
- Specify whether or not to shut down this neighbor under
memory pressure.
required: false
choices: ['true', 'false']
default: null
maximum_peers:
description:
- Specify Maximum number of peers for this neighbor prefix
Valid values are between 1 and 1000, or 'default', which does
not impose the limit.
required: false
default: null
pwd:
description:
- Specify the password for neighbor. Valid value is string.
required: false
default: null
pwd_type:
description:
- Specify the encryption type the password will use. Valid values
are '3des' or 'cisco_type_7' encryption.
required: false
choices: ['3des', 'cisco_type_7']
default: null
remote_as:
description:
- Specify Autonomous System Number of the neighbor.
Valid values are String or Integer in ASPLAIN or ASDOT notation,
or 'default', which means not to configure it.
required: false
default: null
remove_private_as:
description:
- Specify the config to remove private AS number from outbound
updates. Valid values are 'enable' to enable this config,
'disable' to disable this config, 'all' to remove all
private AS number, or 'replace-as', to replace the private
AS number.
required: false
choices: ['enable', 'disable', 'all', 'replace-as']
default: null
shutdown:
description:
- Configure to administratively shutdown this neighbor.
required: false
choices: ['true','false']
default: null
suppress_4_byte_as:
description:
- Configure to suppress 4-byte AS Capability.
required: false
choices: ['true','false']
default: null
timers_keepalive:
description:
- Specify keepalive timer value. Valid values are integers
between 0 and 3600 in terms of seconds, or 'default',
which is 60.
required: false
default: null
timers_holdtime:
description:
- Specify holdtime timer value. Valid values are integers between
0 and 3600 in terms of seconds, or 'default', which is 180.
required: false
default: null
transport_passive_only:
description:
- Specify whether or not to only allow passive connection setup.
Valid values are 'true', 'false', and 'default', which defaults
to 'false'. This property can only be configured when the
neighbor is in 'ip' address format without prefix length.
This property and the transport_passive_mode property are
mutually exclusive.
required: false
choices: ['true','false']
default: null
update_source:
description:
- Specify source interface of BGP session and updates.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# create a new neighbor
- nxos_bgp_neighbor:
asn: 65535
neighbor: 3.3.3.3
local_as: 20
remote_as: 30
description: "just a description"
update_source: Ethernet1/3
shutdown: default
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"asn": "65535", "description": "just a description",
"local_as": "20", "neighbor": "3.3.3.3",
"remote_as": "30", "shutdown": "default",
"update_source": "Ethernet1/3", "vrf": "default"}
existing:
description: k/v pairs of existing BGP neighbor configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of BGP neighbor configuration after module execution
returned: verbose mode
type: dict
sample: {"asn": "65535", "capability_negotiation": false,
"connected_check": false, "description": "just a description",
"dynamic_capability": true, "ebgp_multihop": "",
"local_as": "20", "log_neighbor_changes": "",
"low_memory_exempt": false, "maximum_peers": "",
"neighbor": "3.3.3.3", "pwd": "",
"pwd_type": "", "remote_as": "30",
"remove_private_as": "disable", "shutdown": false,
"suppress_4_byte_as": false, "timers_holdtime": "180",
"timers_keepalive": "60", "transport_passive_only": false,
"update_source": "Ethernet1/3", "vrf": "default"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "neighbor 3.3.3.3",
"remote-as 30", "update-source Ethernet1/3",
"description just a description", "local-as 20"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
WARNINGS = []
BOOL_PARAMS = [
'capability_negotiation',
'shutdown',
'connected_check',
'dynamic_capability',
'low_memory_exempt',
'suppress_4_byte_as',
'transport_passive_only'
]
PARAM_TO_COMMAND_KEYMAP = {
'asn': 'router bgp',
'capability_negotiation': 'dont-capability-negotiate',
'connected_check': 'disable-connected-check',
'description': 'description',
'dynamic_capability': 'dynamic-capability',
'ebgp_multihop': 'ebgp-multihop',
'local_as': 'local-as',
'log_neighbor_changes': 'log-neighbor-changes',
'low_memory_exempt': 'low-memory exempt',
'maximum_peers': 'maximum-peers',
'neighbor': 'neighbor',
'pwd': 'password',
'pwd_type': 'password-type',
'remote_as': 'remote-as',
'remove_private_as': 'remove-private-as',
'shutdown': 'shutdown',
'suppress_4_byte_as': 'capability suppress 4-byte-as',
'timers_keepalive': 'timers-keepalive',
'timers_holdtime': 'timers-holdtime',
'transport_passive_only': 'transport connection-mode passive',
'update_source': 'update-source',
'vrf': 'vrf'
}
PARAM_TO_DEFAULT_KEYMAP = {
'shutdown': False,
'dynamic_capability': True,
'timers_keepalive': 60,
'timers_holdtime': 180
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_custom_value(arg, config, module):
value = ''
splitted_config = config.splitlines()
if arg == 'log_neighbor_changes':
for line in splitted_config:
if 'log-neighbor-changes' in line:
if 'disable' in line:
value = 'disable'
else:
value = 'enable'
elif arg == 'pwd':
for line in splitted_config:
if 'password' in line:
splitted_line = line.split()
value = splitted_line[2]
elif arg == 'pwd_type':
for line in splitted_config:
if 'password' in line:
splitted_line = line.split()
value = splitted_line[1]
elif arg == 'remove_private_as':
value = 'disable'
for line in splitted_config:
if 'remove-private-as' in line:
splitted_line = line.split()
if len(splitted_line) == 1:
value = 'enable'
elif len(splitted_line) == 2:
value = splitted_line[1]
elif arg == 'timers_keepalive':
REGEX = re.compile(r'(?:timers\s)(?P<value>.*)$', re.M)
value = ''
if 'timers' in config:
parsed = REGEX.search(config).group('value').split()
value = parsed[0]
elif arg == 'timers_holdtime':
REGEX = re.compile(r'(?:timers\s)(?P<value>.*)$', re.M)
value = ''
if 'timers' in config:
parsed = REGEX.search(config).group('value').split()
if len(parsed) == 2:
value = parsed[1]
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
custom = [
'log_neighbor_changes',
'pwd',
'pwd_type',
'remove_private_as',
'timers_holdtime',
'timers_keepalive'
]
try:
asn_regex = '.*router\sbgp\s(?P<existing_asn>\d+).*'
match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
existing_asn_group = match_asn.groupdict()
existing_asn = existing_asn_group['existing_asn']
except AttributeError:
existing_asn = ''
if existing_asn:
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'vrf', 'neighbor']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['asn'] = existing_asn
existing['neighbor'] = module.params['neighbor']
existing['vrf'] = module.params['vrf']
else:
WARNINGS.append("The BGP process didn't exist but the task"
" just created it.")
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'log-neighbor-changes':
if value == 'enable':
commands.append('{0}'.format(key))
elif value == 'disable':
commands.append('{0} {1}'.format(key, value))
elif value == 'inherit':
if existing_commands.get(key):
commands.append('no {0}'.format(key))
elif key == 'password':
pwd_type = module.params['pwd_type']
if pwd_type == '3des':
pwd_type = 3
else:
pwd_type = 7
command = '{0} {1} {2}'.format(key, pwd_type, value)
if command not in commands:
commands.append(command)
elif key == 'remove-private-as':
if value == 'enable':
command = '{0}'.format(key)
commands.append(command)
elif value == 'disable':
if existing_commands.get(key) != 'disable':
command = 'no {0}'.format(key)
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
elif key.startswith('timers'):
command = 'timers {0} {1}'.format(
proposed_commands['timers-keepalive'],
proposed_commands['timers-holdtime'])
if command not in commands:
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
# make sure that local-as is the last command in the list.
local_as_command = 'local-as {0}'.format(module.params['local_as'])
if local_as_command in commands:
commands.remove(local_as_command)
commands.append(local_as_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
commands.append('no neighbor {0}'.format(module.params['neighbor']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
neighbor=dict(required=True, type='str'),
description=dict(required=False, type='str'),
capability_negotiation=dict(required=False, type='bool'),
connected_check=dict(required=False, type='bool'),
dynamic_capability=dict(required=False, type='bool'),
ebgp_multihop=dict(required=False, type='str'),
local_as=dict(required=False, type='str'),
log_neighbor_changes=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
low_memory_exempt=dict(required=False, type='bool'),
maximum_peers=dict(required=False, type='str'),
pwd=dict(required=False, type='str'),
pwd_type=dict(required=False, type='str', choices=['cleartext', '3des', 'cisco_type_7', 'default']),
remote_as=dict(required=False, type='str'),
remove_private_as=dict(required=False, type='str', choices=['enable', 'disable', 'all', 'replace-as']),
shutdown=dict(required=False, type='str'),
suppress_4_byte_as=dict(required=False, type='bool'),
timers_keepalive=dict(required=False, type='str'),
timers_holdtime=dict(required=False, type='str'),
transport_passive_only=dict(required=False, type='bool'),
update_source=dict(required=False, type='str'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['timer_bgp_hold',
'timer_bgp_keepalive']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
if module.params['pwd_type'] == 'default':
module.params['pwd_type'] = '0'
args = [
'asn',
'capability_negotiation',
'connected_check',
'description',
'dynamic_capability',
'ebgp_multihop',
'local_as',
'log_neighbor_changes',
'low_memory_exempt',
'maximum_peers',
'neighbor',
'pwd',
'pwd_type',
'remote_as',
'remove_private_as',
'shutdown',
'suppress_4_byte_as',
'timers_keepalive',
'timers_holdtime',
'transport_passive_only',
'update_source',
'vrf'
]
existing = invoke('get_existing', module, args)
if existing.get('asn'):
if (existing.get('asn') != module.params['asn'] and
state == 'present'):
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf', 'neighbor', 'pwd_type']:
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
response = load_config(module, candidate)
result.update(response)
else:
result['updates'] = []
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,801,025,472,138,964,000 | 34.257962 | 111 | 0.572712 | false |
ResearchSoftwareInstitute/MyHPOM | hs_core/management/commands/fix_missing_logical_files.py | 1 | 2354 | """Fix missing logical file for composite resources. If there are resource files in django
for any composite resource that are not part of any logical file, each of those files are made
part of a generic logical file.
* By default, prints errors on stdout.
* Optional argument --log: logs output to system log.
"""
from django.core.management.base import BaseCommand
from hs_composite_resource.models import CompositeResource
class Command(BaseCommand):
help = "Set generic logical file for any resource file that is not part of any logical file."
def add_arguments(self, parser):
# a list of resource id's, or none to check all resources
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--log',
action='store_true', # True for presence, False for absence
dest='log', # value is options['log']
help='log errors to system log',
)
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
try:
resource = CompositeResource.objects.get(short_id=rid)
except CompositeResource.DoesNotExist:
msg = "Resource with id {} not found in Django Resources".format(rid)
print(msg)
continue
print("SETTING GENERIC LOGICAL FILE FOR FILES IN RESOURCE {}".format(rid))
for res_file in resource.files.all():
if not res_file.has_logical_file:
print("Logical file missing for file {}".format(res_file.short_path))
resource.set_default_logical_file()
else: # check all composite resources
print("SETTING GENERIC LOGICAL FILE FOR FILES IN ALL COMPOSITE RESOURCES")
for r in CompositeResource.objects.all():
print("SETTING GENERIC LOGICAL FILE FOR FILES IN RESOURCE {}".format(r.short_id))
for res_file in r.files.all():
if not res_file.has_logical_file:
print("Logical file missing for file {}".format(res_file.short_path))
r.set_default_logical_file()
| bsd-3-clause | -5,936,658,179,182,475,000 | 43.415094 | 97 | 0.608326 | false |
ponyorm/pony | pony/orm/tests/test_declarative_func_monad.py | 1 | 7340 | from __future__ import absolute_import, print_function, division
from pony.py23compat import PY2, PYPY, PYPY2
import sys, unittest
from datetime import date, datetime
from decimal import Decimal
from pony.orm.core import *
from pony.orm.sqltranslation import IncomparableTypesError
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
db = Database()
class Student(db.Entity):
id = PrimaryKey(int)
name = Required(unicode)
dob = Required(date)
last_visit = Required(datetime)
scholarship = Required(Decimal, 6, 2)
phd = Required(bool)
group = Required('Group')
class Group(db.Entity):
number = PrimaryKey(int)
students = Set(Student)
class TestFuncMonad(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_database(db)
with db_session:
g1 = Group(number=1)
g2 = Group(number=2)
Student(id=1, name="AA", dob=date(1981, 1, 1), last_visit=datetime(2011, 1, 1, 11, 11, 11),
scholarship=Decimal("0"), phd=True, group=g1)
Student(id=2, name="BB", dob=date(1982, 2, 2), last_visit=datetime(2011, 2, 2, 12, 12, 12),
scholarship=Decimal("202.2"), phd=True, group=g1)
Student(id=3, name="CC", dob=date(1983, 3, 3), last_visit=datetime(2011, 3, 3, 13, 13, 13),
scholarship=Decimal("303.3"), phd=False, group=g1)
Student(id=4, name="DD", dob=date(1984, 4, 4), last_visit=datetime(2011, 4, 4, 14, 14, 14),
scholarship=Decimal("404.4"), phd=False, group=g2)
Student(id=5, name="EE", dob=date(1985, 5, 5), last_visit=datetime(2011, 5, 5, 15, 15, 15),
scholarship=Decimal("505.5"), phd=False, group=g2)
@classmethod
def tearDownClass(cls):
teardown_database(db)
def setUp(self):
rollback()
db_session.__enter__()
def tearDown(self):
rollback()
db_session.__exit__()
def test_minmax1(self):
result = set(select(s for s in Student if max(s.id, 3) == 3 ))
self.assertEqual(result, {Student[1], Student[2], Student[3]})
def test_minmax2(self):
result = set(select(s for s in Student if min(s.id, 3) == 3 ))
self.assertEqual(result, {Student[4], Student[5], Student[3]})
def test_minmax3(self):
result = set(select(s for s in Student if max(s.name, "CC") == "CC" ))
self.assertEqual(result, {Student[1], Student[2], Student[3]})
def test_minmax4(self):
result = set(select(s for s in Student if min(s.name, "CC") == "CC" ))
self.assertEqual(result, {Student[4], Student[5], Student[3]})
def test_minmax5(self):
x = chr(128)
try: result = set(select(s for s in Student if min(s.name, x) == "CC" ))
except TypeError as e:
self.assertTrue(PY2 and e.args[0] == "The bytestring '\\x80' contains non-ascii symbols. Try to pass unicode string instead")
else: self.assertFalse(PY2)
def test_minmax6(self):
x = chr(128)
try: result = set(select(s for s in Student if min(s.name, x, "CC") == "CC" ))
except TypeError as e:
self.assertTrue(PY2 and e.args[0] == "The bytestring '\\x80' contains non-ascii symbols. Try to pass unicode string instead")
else: self.assertFalse(PY2)
def test_minmax7(self):
result = set(select(s for s in Student if min(s.phd, 2) == 2 ))
def test_date_func1(self):
result = set(select(s for s in Student if s.dob >= date(1983, 3, 3)))
self.assertEqual(result, {Student[3], Student[4], Student[5]})
# @raises_exception(ExprEvalError, "date(1983, 'three', 3) raises TypeError: an integer is required")
@raises_exception(TypeError, "'month' argument of date(year, month, day) function must be of 'int' type. "
"Got: '%s'" % unicode.__name__)
def test_date_func2(self):
result = set(select(s for s in Student if s.dob >= date(1983, 'three', 3)))
# @raises_exception(NotImplementedError)
# def test_date_func3(self):
# d = 3
# result = set(select(s for s in Student if s.dob >= date(1983, d, 3)))
def test_datetime_func1(self):
result = set(select(s for s in Student if s.last_visit >= date(2011, 3, 3)))
self.assertEqual(result, {Student[3], Student[4], Student[5]})
def test_datetime_func2(self):
result = set(select(s for s in Student if s.last_visit >= datetime(2011, 3, 3)))
self.assertEqual(result, {Student[3], Student[4], Student[5]})
def test_datetime_func3(self):
result = set(select(s for s in Student if s.last_visit >= datetime(2011, 3, 3, 13, 13, 13)))
self.assertEqual(result, {Student[3], Student[4], Student[5]})
# @raises_exception(ExprEvalError, "datetime(1983, 'three', 3) raises TypeError: an integer is required")
@raises_exception(TypeError, "'month' argument of datetime(...) function must be of 'int' type. "
"Got: '%s'" % unicode.__name__)
def test_datetime_func4(self):
result = set(select(s for s in Student if s.last_visit >= datetime(1983, 'three', 3)))
# @raises_exception(NotImplementedError)
# def test_datetime_func5(self):
# d = 3
# result = set(select(s for s in Student if s.last_visit >= date(1983, d, 3)))
def test_datetime_now1(self):
result = set(select(s for s in Student if s.dob < date.today()))
self.assertEqual(result, {Student[1], Student[2], Student[3], Student[4], Student[5]})
@raises_exception(ExprEvalError, "`1 < datetime.now()` raises TypeError: " + (
"can't compare 'datetime' to 'int'" if PYPY2 else
"'<' not supported between instances of 'int' and 'datetime'" if PYPY and sys.version_info >= (3, 6) else
"unorderable types: int < datetime" if PYPY else
"can't compare datetime.datetime to int" if PY2 else
"unorderable types: int() < datetime.datetime()" if sys.version_info < (3, 6) else
"'<' not supported between instances of 'int' and 'datetime.datetime'"))
def test_datetime_now2(self):
select(s for s in Student if 1 < datetime.now())
def test_datetime_now3(self):
result = set(select(s for s in Student if s.dob < datetime.today()))
self.assertEqual(result, {Student[1], Student[2], Student[3], Student[4], Student[5]})
def test_decimal_func(self):
result = set(select(s for s in Student if s.scholarship >= Decimal("303.3")))
self.assertEqual(result, {Student[3], Student[4], Student[5]})
def test_concat_1(self):
result = set(select(concat(s.name, ':', s.dob.year, ':', s.scholarship) for s in Student))
if db.provider.dialect == 'PostgreSQL':
self.assertEqual(result, {'AA:1981:0.00', 'BB:1982:202.20', 'CC:1983:303.30', 'DD:1984:404.40', 'EE:1985:505.50'})
else:
self.assertEqual(result, {'AA:1981:0', 'BB:1982:202.2', 'CC:1983:303.3', 'DD:1984:404.4', 'EE:1985:505.5'})
@raises_exception(TranslationError, 'Invalid argument of concat() function: g.students')
def test_concat_2(self):
result = set(select(concat(g.number, g.students) for g in Group))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,263,714,984,508,391,000 | 49.62069 | 137 | 0.613624 | false |
proofit404/dependencies | tests/helpers/helpers.py | 1 | 4650 | import functools
import inspect
import random
import re
import string
import pytest
class CodeCollector:
"""Dedicated decorator to use functions as Py.Test function parameters."""
seen = set()
def __init__(self, *names):
self.names = names or ("code",)
self.collected = []
def parametrize(self, test_func):
"""Parametrize decorated test function with collected functions."""
iterable = _Iterable(test_func.__name__, self.collected)
return pytest.mark.parametrize(self.names, iterable)(test_func)
def __call__(self, *args):
"""Mark decorated function as a test parameter."""
if not self._is_complete(args):
return functools.partial(self.__call__, *args)
f = args[-1]
self._validate(f)
self._remember(f)
self._add(args)
return f
def _is_complete(self, args):
return len(self.names) == len(args)
def _validate(self, f):
_validate_function(f)
_validate_uniqueness(f, self.seen)
_validate_length(f)
_validate_prefix(f)
_validate_tail(f)
_validate_assert_statement(f)
def _remember(self, f):
self.seen.add(f.__name__)
def _add(self, arg):
self.collected.append(arg)
class _Iterable:
def __init__(self, test_name, data):
self.test_name = test_name
self.data = data
def __iter__(self):
return _Iterator(self.test_name, self.data)
class _Iterator:
def __init__(self, test_name, data):
self.test_name = test_name
self.data = data
self.state = iter(data)
def __next__(self):
_validate_collected(self.test_name, self.data)
return next(self.state)
def _validate_collected(test_name, collected):
if not collected: # pragma: no cover
raise Exception("No functions was collected")
elif len(collected) == 1: # pragma: no cover
message = single_item_collected.format(test_func=test_name)
raise Exception(message)
def _validate_function(function):
if not callable(function): # pragma: no cover
raise Exception("Last argument should be a function")
def _validate_uniqueness(function, seen):
if function.__name__ in seen: # pragma: no cover
suggested = _suggest()
message = repeated_template.format(
function_name=function.__name__, suggested=suggested
)
raise Exception(message)
def _validate_length(function):
if len(function.__name__) != 13: # pragma: no cover
suggested = _suggest()
message = wrong_length_template.format(
function_name=function.__name__, suggested=suggested
)
raise Exception(message)
def _validate_prefix(function):
if not function.__name__.startswith("_"): # pragma: no cover
suggested = _suggest()
message = wrong_prefix_template.format(
function_name=function.__name__, suggested=suggested
)
raise Exception(message)
def _validate_tail(function):
for char in function.__name__[1:]:
if char not in string.ascii_letters + string.digits: # pragma: no cover
suggested = _suggest()
message = wrong_name_template.format(
function_name=function.__name__, suggested=suggested
)
raise Exception(message)
def _suggest(): # pragma: no cover
head = random.choice(string.ascii_lowercase)
tail = (random.choice(string.ascii_letters + string.digits) for _ in range(11))
return "_" + head + "".join(tail)
def _validate_assert_statement(function):
source = inspect.getsource(function)
if re.search(r"\bassert\b", source): # pragma: no cover
message = assert_found_template.format(function_name=function.__name__)
raise Exception(message)
# Messages.
single_item_collected = """
Only one function was collected as test parameter.
Collect more functions or inline parameter inside test.
Inspect {test_func!r} definition.
""".strip()
repeated_template = """
{function_name} was already collected.
How about {suggested!r}
""".strip()
wrong_length_template = """
{function_name} should be 13 characters long.
How about {suggested!r}
""".strip()
wrong_prefix_template = """
{function_name} should be a private function.
How about {suggested!r}
""".strip()
wrong_name_template = """
{function_name} should a have random name.
How about {suggested!r}
""".strip()
assert_found_template = """
{function_name} contains assert statement.
All assert statements should be placed in the parametrized test.
""".strip()
| lgpl-3.0 | 4,739,108,330,601,702,000 | 24.977654 | 83 | 0.633118 | false |
ssssam/nightbus | tests/test_main.py | 1 | 4652 | # Copyright 2017 Codethink Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Primary test cases for Night Bus automation tool.'''
import pytest
import io
import os
import sys
import nightbus
# Include parallel-ssh submodule in search path.
# As well as getting us the version of parallel-ssh with our local
# modifications, this allows us to access the embedded_server module that
# Parallel-SSH uses for its automated tests.
package_dir = os.path.dirname(__file__)
embedded_pssh_lib_dir = os.path.join(package_dir, '..', 'parallel-ssh')
sys.path = [package_dir, embedded_pssh_lib_dir] + sys.path
import pssh
from embedded_server import embedded_server
@pytest.fixture
def example_hosts():
'''Fixture providing two temporary SSH servers
Returns a nightbus.ssh_config.SSHConfig instance.
'''
server_host_1 = '127.0.0.1'
server_socket_1 = embedded_server.make_socket(server_host_1)
server_listen_port_1 = server_socket_1.getsockname()[1]
server_1 = embedded_server.start_server(server_socket_1)
server_host_2 = '127.0.0.2'
server_socket_2 = embedded_server.make_socket(server_host_2)
server_listen_port_2 = server_socket_2.getsockname()[1]
server_2 = embedded_server.start_server(server_socket_2)
hosts = '''
%s: { port: %s }
%s: { port: %s }
''' % (server_host_1, server_listen_port_1, server_host_2, server_listen_port_2)
return nightbus.ssh_config.SSHConfig(hosts)
def test_success_simple(example_hosts, tmpdir):
'''Basic test of a task that should succeed.'''
TASKS = '''
tasks:
- name: print-hello
commands: echo "hello"
'''
tasks = nightbus.tasks.TaskList(TASKS)
client = pssh.ParallelSSHClient(example_hosts, host_config=example_hosts)
results = nightbus.tasks.run_all_tasks(
client, example_hosts, tasks, log_directory=str(tmpdir))
report_buffer = io.StringIO()
nightbus.tasks.write_report(report_buffer, results)
report = report_buffer.getvalue()
assert sorted(os.listdir(str(tmpdir))) == [
'1.print-hello.127.0.0.1.log', '1.print-hello.127.0.0.2.log'
]
assert '127.0.0.1: succeeded' in report
assert '127.0.0.2: succeeded' in report
def test_failure_simple(example_hosts, tmpdir):
'''Basic test of a task that should fail.'''
TASKS = '''
tasks:
- name: print-hello
commands: exit 1
'''
tasks = nightbus.tasks.TaskList(TASKS)
client = pssh.ParallelSSHClient(example_hosts, host_config=example_hosts)
results = nightbus.tasks.run_all_tasks(
client, example_hosts, tasks, log_directory=str(tmpdir))
report_buffer = io.StringIO()
nightbus.tasks.write_report(report_buffer, results)
report = report_buffer.getvalue()
assert sorted(os.listdir(str(tmpdir))) == [
'1.print-hello.127.0.0.1.log', '1.print-hello.127.0.0.2.log'
]
assert '127.0.0.1: failed' in report
assert '127.0.0.2: failed' in report
def test_messages(example_hosts, tmpdir):
'''A task can log messages that end up in the report file.'''
TASKS = '''
tasks:
- name: messages
commands: |
echo "This message isn't shown."
echo "##nightbus This message is the same for all hosts"
echo "##nightbus This message is different per host: $(date +%N)"
'''
tasks = nightbus.tasks.TaskList(TASKS)
client = pssh.ParallelSSHClient(example_hosts, host_config=example_hosts)
results = nightbus.tasks.run_all_tasks(
client, example_hosts, tasks, log_directory=str(tmpdir))
report_buffer = io.StringIO()
nightbus.tasks.write_report(report_buffer, results)
report = report_buffer.getvalue()
report_lines = report.splitlines(0)
assert report_lines[0] == '1.messages:'
assert report_lines[1] == ' This message is the same for all hosts'
assert report_lines[2].startswith(' - 127.0.0.1: succeeded in')
assert report_lines[3].startswith(' This message is different per host:')
assert report_lines[4].startswith(' - 127.0.0.2: succeeded in')
assert report_lines[5].startswith(' This message is different per host:')
| apache-2.0 | 7,404,250,999,173,043,000 | 31.760563 | 84 | 0.682502 | false |
sravel/scripts | GUI/DAPC/syntax.py | 1 | 6372 | # syntax.py
import sys
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
# Syntax styles that can be shared by all languages
STYLES = {
'keyword': format('blue'),
'operator': format('red'),
'brace': format('darkGray'),
'defclass': format('black', 'bold'),
'string': format('magenta'),
'string2': format('darkMagenta'),
'comment': format('darkGreen', 'italic'),
'self': format('black', 'italic'),
'numbers': format('brown'),
'perso': format('darkRed'),
'replace': format('red'),
}
class PythonHighlighter (QSyntaxHighlighter):
"""Syntax highlighter for the Python language.
"""
#Perso
perso = ['\*{2}\w+_?\w++\*{0}\*{2}']
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'NONE', 'TRUE', 'FALSE','NULL',
'None', 'True', 'False','Null',
'library', 'function'
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in PythonHighlighter.keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in PythonHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PythonHighlighter.braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# From perso
(r'\*\*\w+\*\*', 0, STYLES['replace']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Build a QRegExp for each pattern
self.rules = [(QRegExp(pat), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(expression.cap(nth))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = text.length() - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
| gpl-3.0 | -4,186,816,283,252,872,700 | 33.258065 | 88 | 0.524011 | false |
rafaelromcar/genetic-ant-knapsack-problem | python-test/pruebaTupla.py | 1 | 12897 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import string
import time
def doitSimple(listWords, textProcessed):
result = 0
for w in textProcessed:
if (w[1]==listWords[0][1]):
if (w[0].lower()==listWords[0][0]):#There is two if because lower function is really expensive
result+=1
return result
def doitHard(listWords, textProcessed):
result = 0
lenListWords = len(listWords)
nListWords = lenListWords-1
lenTextProcessed = len(textProcessed)
for i,w in enumerate(textProcessed):
controler = True
# HAY QUE CONTROLAR QUE SI ENTRA, EN TEXTPROCESSED HAYA SUFICIENTES STRING DESPUES
# PARA COMPROBAR CON TODOS LOS STRINGS DE LISTWORDS. SINO, INDEXBOUND ERROR
if (w[1]==listWords[0][1] and lenTextProcessed-i>=lenListWords):
for x in range(0, nListWords):
if (textProcessed[i+x][1]==listWords[x][1]):
if (textProcessed[i+x][0].lower()==listWords[x][0]):
controler = controler and True
else:
controler = controler and False
else:
controler = controler and False
if controler:
result+=1
return result
def wordSeparator(text):
textProcessed = []
firstC = 0
lettersNumbers = string.ascii_letters + string.digits + "'"
lettersSpace = string.ascii_letters + string.whitespace
spaces = string.whitespace
lenText = len(text)
for i,c in enumerate(text):
if c in spaces:
textProcessed.append((text[firstC:i],i-firstC))
textProcessed.append((text[i:i+1],'1'))
firstC = i + 1
elif c=="'" and i!=lenText-1 and i!=0 and (text[i-1] not in lettersSpace or text[i+1] not in lettersSpace):
textProcessed.append((text[firstC:i], i-firstC))
textProcessed.append((text[i:i+1],'1'))
firstC = i + 1
elif c not in lettersNumbers:
textProcessed.append((text[firstC:i], i-firstC))
textProcessed.append((text[i:i+1],'1'))
firstC = i + 1
elif i==lenText-1:
textProcessed.append((text[firstC:i+1],i+1-firstC))
return textProcessed
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def CountOccurencesInText(word,text):
"""Number of occurences of word (case insensitive) in text"""
#This does not pass the unittests:
to_unicode_or_bust(word)
word = word.lower()
result = -1
listWords = wordSeparator(word)
if (len(listWords)==0 or len(word)>len(text)):
print "There is no word or word is bigger than text!"
else:
to_unicode_or_bust(text)
textProcessed = wordSeparator(text)
if (len(listWords)==1):
doitSimpleTime = time.time()
result = doitSimple(listWords, textProcessed)
else:
result = doitHard(listWords, textProcessed)
return result
def testCountOccurencesInText():
""" Test the CountOccurencesInText function"""
text="""Georges is my name and I like python. Oh ! your name is georges? And you like Python!
Yes is is true, I like PYTHON
and my name is GEORGES"""
# test with a little text.
assert( 3 == CountOccurencesInText("Georges",text) )
assert( 3 == CountOccurencesInText("GEORGES",text) )
assert( 3 == CountOccurencesInText("georges",text) )
assert( 0 == CountOccurencesInText("george",text) )
assert( 3 == CountOccurencesInText("python",text) )
assert( 3 == CountOccurencesInText("PYTHON",text) )
assert( 2 == CountOccurencesInText("I",text) )
assert( 0 == CountOccurencesInText("n",text) )
assert( 1 == CountOccurencesInText("true",text) )
# regard ' as text:
assert ( 0 == CountOccurencesInText ( "maley", "John O'maley is my friend" ) )
# Test it but with a BIG length file. (we once had a memory error with this...)
text = """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy dog.""" * 500
text += """The quick brown fox jump over the lazy dog.The quick brown Georges jump over the lazy dog."""
text += """esrf sqfdg sfdglkj sdflgh sdflgjdsqrgl """ * 4000
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy python."""
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy dog.""" * 500
text += """The quick brown fox jump over the lazy dog.The quick brown Georges jump over the lazy dog."""
text += """esrf sqfdg sfdglkj sdflgh sdflgjdsqrgl """ * 4000
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy python."""
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy dog.""" * 500
text += """The quick brown fox jump over the lazy dog.The quick brown Georges jump over the lazy dog."""
text += """esrf sqfdg sfdglkj sdflgh sdflgjdsqrgl """ * 4000
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy python."""
text += """The quick brown fox jump over the true lazy dog.The quick brown fox jump over the lazy dog."""
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy dog.""" * 500
text += """ I vsfgsdfg sfdg sdfg sdgh sgh I sfdgsdf"""
text += """The quick brown fox jump over the lazy dog.The quick brown fox jump over the lazy dog.""" * 500
assert( 3 == CountOccurencesInText("Georges",text) )
assert( 3 == CountOccurencesInText("GEORGES",text) )
assert( 3 == CountOccurencesInText("georges",text) )
assert( 0 == CountOccurencesInText("george",text) )
assert( 3 == CountOccurencesInText("python",text) )
assert( 3 == CountOccurencesInText("PYTHON",text) )
assert( 2 == CountOccurencesInText("I",text) )
assert( 0 == CountOccurencesInText("n",text) )
assert( 1 == CountOccurencesInText("true",text) )
assert( 0 == CountOccurencesInText("reflexion mirror",
"I am a senior citizen and I live in the Fun-Plex 'Reflexion Mirror' in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("'reflexion mirror'",
"I am a senior citizen and I live in the Fun-Plex 'Reflexion Mirror' in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("reflexion mirror",
"I am a senior citizen and I live in the Fun-Plex (Reflexion Mirror) in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("reflexion mirror",
"Reflexion Mirror\" in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("reflexion mirror",
u"I am a senior citizen and I live in the Fun-Plex «Reflexion Mirror» in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("reflexion mirror",
u"I am a senior citizen and I live in the Fun-Plex \u201cReflexion Mirror\u201d in Sopchoppy, Florida") )
assert( 1 == CountOccurencesInText("legitimate",
u"who is approved by OILS is completely legitimate: their employees are of legal working age") )
assert( 0 == CountOccurencesInText("legitimate their",
u"who is approved by OILS is completely legitimate: their employees are of legal working age") )
assert( 1 == CountOccurencesInText("get back to me",
u"I hope you will consider this proposal, and get back to me as soon as possible") )
assert( 1 == CountOccurencesInText("skin-care",
u"enable Delavigne and its subsidiaries to create a skin-care monopoly") )
assert( 1 == CountOccurencesInText("skin-care monopoly",
u"enable Delavigne and its subsidiaries to create a skin-care monopoly") )
assert( 0 == CountOccurencesInText("skin-care monopoly in the US",
u"enable Delavigne and its subsidiaries to create a skin-care monopoly") )
assert( 1 == CountOccurencesInText("get back to me",
u"When you know:get back to me") )
assert( 1 == CountOccurencesInText("don't be left" , """emergency alarm warning.
Don't be left unprotected. Order your SSSS3000 today!""" ) )
assert( 1 == CountOccurencesInText("don" , """emergency alarm warning.
Don't be left unprotected. Order your don SSSS3000 today!""" ) )
assert( 1 == CountOccurencesInText("take that as a 'yes'",
"Do I have to take that as a 'yes'?") )
assert( 1 == CountOccurencesInText("don't take that as a 'yes'",
"I don't take that as a 'yes'?") )
assert( 1 == CountOccurencesInText("take that as a 'yes'",
"I don't take that as a 'yes'?") )
assert( 1 == CountOccurencesInText("don't",
"I don't take that as a 'yes'?") )
assert( 1 == CountOccurencesInText("attaching my c.v. to this e-mail",
"I am attaching my c.v. to this e-mail." ))
assert ( 1 == CountOccurencesInText ( "Linguist", "'''Linguist Specialist Found Dead on Laboratory Floor'''" ))
assert ( 1 == CountOccurencesInText ( "Linguist Specialist", "'''Linguist Specialist Found Dead on Laboratory Floor'''" ))
assert ( 1 == CountOccurencesInText ( "Laboratory Floor", "'''Linguist Specialist Found Dead on Laboratory Floor'''" ))
assert ( 1 == CountOccurencesInText ( "Floor", "'''Linguist Specialist Found Dead on Laboratory Floor'''" ))
assert ( 1 == CountOccurencesInText ( "Floor", "''Linguist Specialist Found Dead on Laboratory Floor''" ))
assert ( 1 == CountOccurencesInText ( "Floor", "__Linguist Specialist Found Dead on Laboratory Floor__" ))
assert ( 1 == CountOccurencesInText ( "Floor", "'''''Linguist Specialist Found Dead on Laboratory Floor'''''" ))
assert ( 1 == CountOccurencesInText ( "Linguist", "'''Linguist Specialist Found Dead on Laboratory Floor'''" ))
assert ( 1 == CountOccurencesInText ( "Linguist", "''Linguist Specialist Found Dead on Laboratory Floor''" ))
assert ( 1 == CountOccurencesInText ( "Linguist", "__Linguist Specialist Found Dead on Laboratory Floor__" ))
assert ( 1 == CountOccurencesInText ( "Linguist", "'''''Linguist Specialist Found Dead on Laboratory Floor'''''" ))
assert ( 1 == CountOccurencesInText ( "Floor", """Look: ''Linguist Specialist Found Dead on Laboratory Floor'' is the headline today."""))
SampleTextForBench = """
A Suggestion Box Entry from Bob Carter
Dear Anonymous,
I'm not quite sure I understand the concept of this 'Anonymous' Suggestion Box. If no one reads what we write, then how will anything ever
change?
But in the spirit of good will, I've decided to offer my two cents, and hopefully Kevin won't steal it! (ha, ha). I would really like to
see more varieties of coffee in the coffee machine in the break room. 'Milk and sugar', 'black with sugar', 'extra sugar' and 'cream and su
gar' don't offer much diversity. Also, the selection of drinks seems heavily weighted in favor of 'sugar'. What if we don't want any suga
r?
But all this is beside the point because I quite like sugar, to be honest. In fact, that's my second suggestion: more sugar in the office.
Cakes, candy, insulin, aspartame... I'm not picky. I'll take it by mouth or inject it intravenously, if I have to.
Also, if someone could please fix the lock on the men's room stall, that would be helpful. Yesterday I was doing my business when Icarus ne
arly climbed into my lap.
So, have a great day!
Anonymously,
Bob Carter
"""
def doit():
"""Run CountOccurencesInText on a few examples"""
i = 0
for x in xrange(400):
i+= CountOccurencesInText("word" , SampleTextForBench)
i+= CountOccurencesInText("sugar" , SampleTextForBench)
i+= CountOccurencesInText("help" , SampleTextForBench)
i+= CountOccurencesInText("heavily" , SampleTextForBench)
i+= CountOccurencesInText("witfull" , SampleTextForBench)
i+= CountOccurencesInText("dog" , SampleTextForBench)
i+= CountOccurencesInText("almost" , SampleTextForBench)
i+= CountOccurencesInText("insulin" , SampleTextForBench)
i+= CountOccurencesInText("attaching" , SampleTextForBench)
i+= CountOccurencesInText("asma" , SampleTextForBench)
i+= CountOccurencesInText("neither" , SampleTextForBench)
i+= CountOccurencesInText("won't" , SampleTextForBench)
i+= CountOccurencesInText("green" , SampleTextForBench)
i+= CountOccurencesInText("parabole" , SampleTextForBench)
print i
#Start the tests
if __name__ == '__main__':
start_time = time.time()
#I need to pass the test:
try:
testCountOccurencesInText()
except:
print "Error !"
raise
print "Tests passed"
print time.time() - start_time
#I need to be fast as well:
import profile
doit_time = time.time()
profile.run('doit()')
print "Doit time"
print time.time() - doit_time
| gpl-2.0 | 3,881,558,764,113,422,300 | 51.632653 | 142 | 0.663978 | false |
Himon-SYNCRAFT/taskplus | taskplus/apps/rest/repositories/task_statuses_repository.py | 1 | 3113 | from taskplus.apps.rest import models
from taskplus.apps.rest.database import db_session
from taskplus.core.domain import TaskStatus
from taskplus.core.shared.repository import Repository
from taskplus.core.shared.exceptions import (
NoResultFound, NotUnique, CannotBeDeleted, DbError)
from sqlalchemy import exc
class TaskStatusesRepository(Repository):
def __init__(self):
self.status_model = models.TaskStatus
self.session = db_session
def one(self, id):
status = self.status_model.query.get(id)
if not status:
raise NoResultFound(id, TaskStatus.__name__)
return TaskStatus(id=status.id, name=status.name)
def list(self, filters=None):
if not filters:
result = self.status_model.query.all()
else:
filters = self._parse_filters(filters)
filters_expression = []
for filter in filters:
key = getattr(self.status_model, filter.key)
filters_expression.append(
getattr(key, filter.operator)(filter.value))
result = self.status_model.query.filter(*filters_expression).all()
return [TaskStatus(id=status.id, name=status.name) for status in result]
def update(self, status):
status_to_update = self.status_model.query.get(status.id)
if not status_to_update:
raise NoResultFound(status.id, TaskStatus.__name__)
try:
status_to_update.name = status.name
self.session.add(status_to_update)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'unique' in str(e).lower():
raise NotUnique('Status already exist')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return TaskStatus(id=status_to_update.id, name=status_to_update.name)
def save(self, status):
try:
new_status = self.status_model(name=status.name)
self.session.add(new_status)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'unique' in str(e).lower():
raise NotUnique('Status already exist')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return TaskStatus(id=new_status.id, name=new_status.name)
def delete(self, id):
status = self.status_model.query.get(id)
if not status:
raise NoResultFound(id, TaskStatus.__name__)
try:
self.session.delete(status)
self.session.commit()
except exc.IntegrityError as e:
self.session.rollback()
if 'foreign' in str(e).lower():
raise CannotBeDeleted('Cannot delete status')
raise
except exc.SQLAlchemyError:
self.session.rollback()
raise DbError()
return TaskStatus(id=id, name=status.name)
| bsd-3-clause | -9,028,159,670,768,161,000 | 28.367925 | 80 | 0.600064 | false |
robinkraft/cloudless | src/cloudless/train/prepare_data.py | 1 | 9092 | import shutil
import os
import time
import csv
import json
from PIL import Image
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
import plyvel
from caffe_pb2 import Datum
import constants
import utils
def prepare_data():
"""
Prepares our training and validation data sets for use by Caffe.
"""
print "Preparing data..."
print "\tParsing Planet Labs data into independent cropped bounding boxes..."
#details = _get_landsat_details()
details = _crop_planetlab_images(_get_planetlab_details())
print "\t\tClass details before balancing (balancing not implemented yet):"
_print_input_details(details)
# TODO(brad): Balance classes.
#_balance_classes(details)
(train_paths, validation_paths, train_targets, validation_targets) = _split_data_sets(details)
print "\tSaving prepared data..."
_generate_leveldb(constants.TRAINING_FILE, train_paths, train_targets)
_generate_leveldb(constants.VALIDATION_FILE, validation_paths, validation_targets)
def _get_planetlab_details():
"""
Loads available image paths and image filenames for landsat, along with any bounding boxes
that might be present for clouds in them.
"""
print "location: %s" % constants.PLANETLAB_METADATA
with open(constants.PLANETLAB_METADATA) as data_file:
details = json.load(data_file)
for entry in details:
entry["image_path"] = os.path.join(constants.PLANETLAB_UNBOUNDED_IMAGES,
entry["image_name"])
entry["target"] = 0
if len(entry["image_annotation"]):
entry["target"] = 1
bboxes = []
for bbox in entry["image_annotation"]:
bbox = bbox.split(",")
x = int(bbox[0])
y = int(bbox[1])
width = int(bbox[2])
height = int(bbox[3])
bboxes.append({
"left": x,
"upper": y,
"right": x + width,
"lower": y + height
})
entry["image_annotation"] = bboxes
return details
def _get_landsat_details():
"""
Loads available image paths and image filenames for landsat, along with their target values if
they contain clouds or not (1 if there is a cloud, 0 otherwise).
"""
image_paths = []
targets = []
with open(constants.LANDSAT_METADATA, 'r') as csvfile:
entryreader = csv.reader(csvfile, delimiter=',', quotechar='"')
firstline = True
for row in entryreader:
if firstline:
firstline = False
continue
filename = row[0]
has_cloud = 0
if row[1] == "1":
has_cloud = 1
image_paths.append(os.path.join(constants.LANDSAT_IMAGES, filename))
targets.append(has_cloud)
return {
"image_paths": image_paths,
"targets": targets,
}
def _crop_planetlab_images(details):
"""
Generates cropped cloud and non-cloud images from our annotated bounding boxes, dumping
them into the file system and returning their full image paths with whether they are targets
or not.
"""
image_paths = []
targets = []
# Remove the directory to ensure we don't get old data runs included.
shutil.rmtree(constants.PLANETLAB_BOUNDED_IMAGES, ignore_errors=True)
os.makedirs(constants.PLANETLAB_BOUNDED_IMAGES)
for entry in details:
if entry["target"] == 0:
# Nothing to crop, but remove the alpha channel.
new_path = os.path.join(constants.PLANETLAB_BOUNDED_IMAGES, entry["image_name"])
im = Image.open(entry["image_path"])
im = _rgba_to_rgb(im)
im.save(new_path)
image_paths.append(new_path)
targets.append(entry["target"])
print "\t\tProcessed non-cloud image %s" % new_path
elif entry["target"] == 1:
(root, ext) = os.path.splitext(entry["image_name"])
cloud_num = 1
for bbox in entry["image_annotation"]:
try:
new_path = os.path.join(constants.PLANETLAB_BOUNDED_IMAGES,
"%s_cloud_%03d%s" % (root, cloud_num, ext))
im = Image.open(entry["image_path"])
im = im.crop((bbox["left"], bbox["upper"], bbox["right"], bbox["lower"]))
im = _rgba_to_rgb(im)
im.save(new_path)
image_paths.append(new_path)
targets.append(1)
print "\t\tProcessed cloud cropped image %s" % new_path
cloud_num += 1
except:
# TODO(brad): Modify the annotation UI to not be able to produce invalid
# crop values.
print "\t\tInvalid crop value"
return {
"image_paths": image_paths,
"targets": targets,
}
def _print_input_details(details):
"""
Prints out statistics about our input data.
"""
positive_cloud_class = 0
negative_cloud_class = 0
for entry in details["targets"]:
if entry == 1:
positive_cloud_class = positive_cloud_class + 1
else:
negative_cloud_class = negative_cloud_class + 1
print "\t\tInput data details:"
print "\t\t\tTotal number of input images: %d" % len(details["image_paths"])
print "\t\t\tPositive cloud count (# of images with clouds): %d" % positive_cloud_class
print "\t\t\tNegative cloud count (# of images without clouds): %d" % negative_cloud_class
# TODO(brad): Print out ratio of positive to negative.
# def _balance_classes(details):
# """
# Ensures we have the same number of positive and negative cloud/not cloud classes.
# """
def _split_data_sets(details):
"""
Shuffles and splits our datasets into training and validation sets.
"""
image_paths = details["image_paths"]
targets = details["targets"]
print "\tShuffling data..."
(image_paths, targets) = shuffle(image_paths, targets, random_state=0)
print "\tSplitting data 80% training, 20% validation..."
return train_test_split(image_paths, targets, train_size=0.8, test_size=0.2, \
random_state=0)
def _generate_leveldb(file_path, image_paths, targets):
"""
Caffe uses the LevelDB format to efficiently load its training and validation data; this method
writes paired out faces in an efficient way into this format.
"""
print "\t\tGenerating LevelDB file at %s..." % file_path
shutil.rmtree(file_path, ignore_errors=True)
db = plyvel.DB(file_path, create_if_missing=True)
wb = db.write_batch()
commit_every = 250000
start_time = int(round(time.time() * 1000))
for idx in range(len(image_paths)):
# Each image is a top level key with a keyname like 00000000011, in increasing
# order starting from 00000000000.
key = utils.get_key(idx)
# Do common normalization that might happen across both testing and validation.
image = _preprocess_data(_load_numpy_image(image_paths[idx]))
# Each entry in the leveldb is a Caffe protobuffer "Datum" object containing details.
datum = Datum()
datum.channels = 3 # RGB
datum.height = constants.HEIGHT
datum.width = constants.WIDTH
# TODO: Should I swap the color channels to BGR?
datum.data = image.tostring()
datum.label = targets[idx]
value = datum.SerializeToString()
wb.put(key, value)
if (idx + 1) % commit_every == 0:
wb.write()
del wb
wb = db.write_batch()
end_time = int(round(time.time() * 1000))
total_time = end_time - start_time
print "\t\t\tWrote batch, key: %s, time for batch: %d ms" % (key, total_time)
start_time = int(round(time.time() * 1000))
end_time = int(round(time.time() * 1000))
total_time = end_time - start_time
print "\t\t\tWriting final batch, time for batch: %d ms" % total_time
wb.write()
db.close()
def _preprocess_data(data):
"""
Applies any standard preprocessing we might do on data, whether it is during
training or testing time. 'data' is a numpy array of unrolled pixel vectors with
a remote sensing image.
"""
# Do nothing for now.
# We don't scale it's values to be between 0 and 1 as our Caffe model will do that.
# TODO(neuberg): Confirm that the AlexNet proto file has correct scaling values
# for the kinds of pixels we will use.
return data
def _load_numpy_image(image_path):
"""
Turns one of our testing image paths into an actual image, converted into a numpy array.
"""
im = Image.open(image_path)
# Scale the image to the size required by our neural network.
im = im.resize((constants.WIDTH, constants.HEIGHT))
data = np.asarray(im)
data = np.reshape(data, (3, constants.HEIGHT, constants.WIDTH))
return data
def _rgba_to_rgb(im):
"""
Drops the alpha channel in an RGB image.
"""
return im.convert('RGB')
| apache-2.0 | -3,873,212,744,560,818,000 | 32.549815 | 99 | 0.615706 | false |
danhuss/faker | faker/providers/currency/__init__.py | 1 | 9734 | from .. import BaseProvider
localized = True
class Provider(BaseProvider):
# Format: (code, name)
currencies = (
("AED", "United Arab Emirates dirham"),
("AFN", "Afghan afghani"),
("ALL", "Albanian lek"),
("AMD", "Armenian dram"),
("ANG", "Netherlands Antillean guilder"),
("AOA", "Angolan kwanza"),
("ARS", "Argentine peso"),
("AUD", "Australian dollar"),
("AWG", "Aruban florin"),
("AZN", "Azerbaijani manat"),
("BAM", "Bosnia and Herzegovina convertible mark"),
("BBD", "Barbadian dollar"),
("BDT", "Bangladeshi taka"),
("BGN", "Bulgarian lev"),
("BHD", "Bahraini dinar"),
("BIF", "Burundian franc"),
("BMD", "Bermudian dollar"),
("BND", "Brunei dollar"),
("BOB", "Bolivian boliviano"),
("BRL", "Brazilian real"),
("BSD", "Bahamian dollar"),
("BTN", "Bhutanese ngultrum"),
("BWP", "Botswana pula"),
("BYR", "Belarusian ruble"),
("BZD", "Belize dollar"),
("CAD", "Canadian dollar"),
("CDF", "Congolese franc"),
("CHF", "Swiss franc"),
("CLP", "Chilean peso"),
("CNY", "Renminbi"),
("COP", "Colombian peso"),
("CRC", "Costa Rican colón"),
("CUC", "Cuban convertible peso"),
("CUP", "Cuban peso"),
("CVE", "Cape Verdean escudo"),
("CZK", "Czech koruna"),
("DJF", "Djiboutian franc"),
("DKK", "Danish krone"),
("DOP", "Dominican peso"),
("DZD", "Algerian dinar"),
("EGP", "Egyptian pound"),
("ERN", "Eritrean nakfa"),
("ETB", "Ethiopian birr"),
("EUR", "Euro"),
("FJD", "Fijian dollar"),
("FKP", "Falkland Islands pound"),
("GBP", "Pound sterling"),
("GEL", "Georgian lari"),
("GGP", "Guernsey pound"),
("GHS", "Ghanaian cedi"),
("GIP", "Gibraltar pound"),
("GMD", "Gambian dalasi"),
("GNF", "Guinean franc"),
("GTQ", "Guatemalan quetzal"),
("GYD", "Guyanese dollar"),
("HKD", "Hong Kong dollar"),
("HNL", "Honduran lempira"),
("HRK", "Croatian kuna"),
("HTG", "Haitian gourde"),
("HUF", "Hungarian forint"),
("IDR", "Indonesian rupiah"),
("ILS", "Israeli new shekel"),
("NIS", "Israeli new shekel"),
("IMP", "Manx pound"),
("INR", "Indian rupee"),
("IQD", "Iraqi dinar"),
("IRR", "Iranian rial"),
("ISK", "Icelandic króna"),
("JEP", "Jersey pound"),
("JMD", "Jamaican dollar"),
("JOD", "Jordanian dinar"),
("JPY", "Japanese yen"),
("KES", "Kenyan shilling"),
("KGS", "Kyrgyzstani som"),
("KHR", "Cambodian riel"),
("KMF", "Comorian franc"),
("KPW", "North Korean won"),
("KRW", "Western Krahn language"),
("KWD", "Kuwaiti dinar"),
("KYD", "Cayman Islands dollar"),
("KZT", "Kazakhstani tenge"),
("LAK", "Lao kip"),
("LBP", "Lebanese pound"),
("LKR", "Sri Lankan rupee"),
("LRD", "Liberian dollar"),
("LSL", "Lesotho loti"),
("LTL", "Lithuanian litas"),
("LYD", "Libyan dinar"),
("MAD", "Moroccan dirham"),
("MDL", "Moldovan leu"),
("MGA", "Malagasy ariar"),
("MKD", "Macedonian denar"),
("MMK", "Burmese kyat"),
("MNT", "Mongolian tugrik"),
("MOP", "Macanese pataca"),
("MRO", "Mauritanian ouguiya"),
("MUR", "Mauritian rupee"),
("MVR", "Maldivian rufiyaa"),
("MWK", "Malawian kwacha"),
("MXN", "Mexican peso"),
("MYR", "Malaysian ringgit"),
("MZN", "Mozambican metical"),
("NAD", "Namibian dollar"),
("NGN", "Nigerian naira"),
("NIO", "Nicaraguan córdoba"),
("NOK", "Norwegian krone"),
("NPR", "Nepalese rupee"),
("NZD", "New Zealand dollar"),
("OMR", "Omani rial"),
("PAB", "Panamanian balboa"),
("PEN", "Peruvian sol"),
("PGK", "Papua New Guinean kina"),
("PHP", "Philippine peso"),
("PKR", "Pakistani rupee"),
("PLN", "Polish zloty"),
("PYG", "Paraguayan guarani"),
("QAR", "Qatari riyal"),
("RON", "Romanian leu"),
("RSD", "Serbian dinar"),
("RUB", "Russian ruble"),
("RWF", "Rwandan franc"),
("SAR", "Saudi riyal"),
("SBD", "Solomon Islands dollar"),
("SCR", "Seychellois rupee"),
("SDG", "Sudanese pound"),
("SEK", "Swedish krona"),
("SGD", "Singapore dollar"),
("SHP", "Saint Helena pound"),
("SLL", "Sierra Leonean leone"),
("SOS", "Somali shilling"),
("SPL", "Seborga luigino"),
("SRD", "Surinamese dollar"),
("STD", "São Tomé and Príncipe dobra"),
("SVC", "Salvadoran colón"),
("SYP", "Syrian pound"),
("SZL", "Swazi lilangeni"),
("THB", "Thai baht"),
("TJS", "Tajikistani somoni"),
("TMT", "Turkmenistan manat"),
("TND", "Tunisian dinar"),
("TOP", "Tongan paʻanga"),
("TRY", "Turkish lira"),
("TTD", "Trinidad and Tobago dollar"),
("TVD", "Tuvaluan dollar"),
("TWD", "New Taiwan dollar"),
("TZS", "Tanzanian shilling"),
("UAH", "Ukrainian hryvnia"),
("UGX", "Ugandan shilling"),
("USD", "United States dollar"),
("UYU", "Uruguayan peso"),
("UZS", "Uzbekistani soʻm"),
("VEF", "Venezuelan bolívar"),
("VND", "Vietnamese đồng"),
("VUV", "Vanuatu vatu"),
("WST", "Samoan tālā"),
("XAF", "Central African CFA franc"),
("XCD", "Eastern Caribbean dollar"),
("XDR", "Special drawing rights"),
("XOF", "West African CFA franc"),
("XPF", "CFP franc"),
("YER", "Yemeni rial"),
("ZAR", "South African rand"),
("ZMW", "Zambian kwacha"),
("ZWD", "Zimbabwean dollar"),
)
# Source: https://en.wikipedia.org/wiki/List_of_cryptocurrencies
cryptocurrencies = (
('AMP', "AMP"),
('AUR', "Auroracoin"),
('BC', "BlackCoin"),
('BTC', "Bitcoin"),
('BURST', "Burstcoin"),
('DASH', "Dash"),
('DOGE', "Dogecoin"),
('EMC', "Emercoin"),
('ETH', "Ethereum"),
('ETC', "Ethereum Classic"),
('GRC', "Gridcoin"),
('KOI', "Coinye"),
('LTC', "Litecoin"),
('MSC', "Omni"),
('MZC', "MazaCoin"),
('NMC', "Namecoin"),
('NXT', "Nxt"),
('POT', "PotCoin"),
('PPC', "Peercoin"),
('TIT', "Titcoin"),
('VTC', "Vertcoin"),
('XDN', "DigitalNote"),
('XMR', "Monero"),
('XPM', "Primecoin"),
('XRP', "Ripple"),
('ZEC', "Zcash"),
('STC', "SwiftCoin"),
('BCN', "Bytecoin"),
('FTH', "Feathercoin"),
('NEO', "NEO"),
('NEM', "XEM"),
('USDT', "Tether"),
('IOTA', "IOTA"),
('DRC', "Decred"),
('WAVES', "Waves Platform"),
('LSK', "Lisk"),
('ZCL', "Zclassic"),
('BCH', "Bitcoin Cash"),
('UBQ', "Ubiq"),
('EOS', "EOS.IO"),
('SRN', "Sirin Labs"),
('TRX', "TRON"),
('ADA', "Cardano"),
)
# List of currency symbols in Unicode, source: https://www.unicode.org/charts/beta/nameslist/n_20A0.html
currency_symbols = {
'AFN': '\u060B', 'ANG': '\u0192', 'ARS': '\u0024', 'AUD': '\u0024', 'AWG': '\u0192', 'BBD': '\u0024',
'BDT': '\u09F3', 'BMD': '\u0024', 'BND': '\u0024', 'BOB': '\u0024', 'BRL': '\u0024', 'BSD': '\u0024',
'BZD': '\u0024', 'CAD': '\u0024', 'CLP': '\u0024', 'CNY': '\u00A5', 'COP': '\u0024', 'CRC': '\u20A1',
'CUP': '\u0024', 'CVE': '\u0024', 'DOP': '\u0024', 'EGP': '\u00A3', 'EUR': '\u20AC', 'FJD': '\u0024',
'FKP': '\u00A3', 'GBP': '\u00A3', 'GHS': '\u20B5', 'GIP': '\u00A3', 'GYD': '\u0024', 'HKD': '\u0024',
'HUF': '\u0192', 'IDR': '\u20A8', 'ILS': '\u20AA', 'INR': '\u20B9', 'IRR': '\uFDFC', 'JMD': '\u0024',
'JPY': '\u00A5', 'KHR': '\u17DB', 'KPW': '\u20A9', 'KRW': '\u20A9', 'KYD': '\u0024', 'KZT': '\u20B8',
'LAK': '\u20AD', 'LBP': '\u00A3', 'LKR': '\u20A8', 'LRD': '\u0024', 'MNT': '\u20AE', 'MOP': '\u0024',
'MUR': '\u20A8', 'MXN': '\u0024', 'NAD': '\u0024', 'NGN': '\u20A6', 'NIO': '\u0024', 'NPR': '\u20A8',
'NZD': '\u0024', 'OMR': '\uFDFC', 'PHP': '\u20B1', 'PKR': '\u20A8', 'PYG': '\u20B2', 'QAR': '\uFDFC',
'RUB': '\u20BD', 'SAR': '\uFDFC', 'SBD': '\u0024', 'SDG': '\u00A3', 'SGD': '\u0024', 'SHP': '\u00A3',
'SRD': '\u0024', 'SYP': '\u00A3', 'THB': '\u0E3F', 'TOP': '\u0024', 'TRY': '\u20BA', 'TTD': '\u0024',
'TWD': '\u0024', 'UAH': '\u20B4', 'USD': '\u0024', 'UY': '\u0024', 'VND': '\u20AB', 'WST': '\u0024',
'XCD': '\u0024', 'YER': '\uFDFC', 'ZWD': '\u0024',
}
def currency(self):
return self.random_element(self.currencies)
def currency_code(self):
return self.currency()[0]
def currency_name(self):
return self.currency()[1]
def currency_symbol(self, code=None):
"""
:example: $
"""
if code is None:
code = self.random_element(self.currency_symbols.keys())
return self.currency_symbols[code]
def cryptocurrency(self):
return self.random_element(self.cryptocurrencies)
def cryptocurrency_code(self):
return self.cryptocurrency()[0]
def cryptocurrency_name(self):
return self.cryptocurrency()[1]
| mit | 1,007,488,436,341,185,300 | 35.814394 | 109 | 0.469904 | false |
drewcsillag/skunkweb | pylibs/vfs/tarfs.py | 1 | 3586 | # Time-stamp: <03/08/11 13:12:06 smulloni>
########################################################################
# Copyright (C) 2002 Andrew Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
########################################################################
import tarlib
from vfs import FS, VFSException
from rosio import RO_StringIO
import pathutil
import os
def bslog(msg):
# try:
# open('/tmp/bullshit','a').write('%s\n' % msg)
# except:
pass
class TarFS(FS):
def __init__(self, path, root='', prefix='/'):
self.path=os.path.abspath(path)
self._contents={} #filled by _readTar
self._filelist=[] #filled by _readTar
self._readTar()
self.__archive=pathutil.Archive(root, prefix)
self.root=root
self.prefix=prefix
self.__archive.savePaths(self._filelist)
bslog(str(self._filelist))
def ministat(self, path):
bslog('ministat %s' % path)
adjusted=pathutil._adjust_user_path(path)
if not self.__archive.paths.has_key(adjusted):
raise VFSException, "no such file or directory: %s" % path
realname=self.__archive.paths[adjusted]
if realname==None:
arcstat=os.stat(self.zpath)[7:]
return (0,) + arcstat
item = self._contents[realname]
return item[1], -1, item[2], item[2]
def _readTar(self):
if self.path[-2:] == 'gz':
import gzip
tarlib.readTar(gzip.GzipFile(self.path), self._readTarEater)
else:
tarlib.readTar(open(self.path), self._readTarEater)
def _readTarEater(self, name, contents, size, mode, uid, gid, mtime,
typeflag, linkname, uname, gname, devmaj, devmin):
self._contents[name] = (contents, size, mtime)
self._filelist.append(name)
def open(self, path, mode='r'):
bslog('getting %s' % path)
adjusted=pathutil._adjust_user_path(path)
if mode!='r':
raise VFSException, "unsupported file open mode"
if not self.__archive.paths.has_key(adjusted):
raise VFSException, "no such file or directory: %s" % path
realname=self.__archive.paths[adjusted]
if realname!=None:
return RO_StringIO(adjusted,
self._contents[realname][0])
else:
raise VFSException, "cannot open directory as file: %s" % path
def listdir(self, path):
bslog('listdir %s' % path)
return self.__archive.listdir(path)
def isdir(self, path):
bslog('isdir %s' % path)
adjusted=pathutil._adjust_user_path(path)
if not self.__archive.paths.has_key(adjusted):
#raise VFSException, "no such file or directory: %s" % path
return 0
realname=self.__archive.paths[adjusted]
if realname==None:
return 1
else:
return realname.endswith('/') and \
self._contents[realname][1]==0
def isfile(self, path):
bslog('isfile %s' % path)
adjusted=pathutil._adjust_user_path(path)
if not self.__archive.paths.has_key(adjusted):
#raise VFSException, "no such file or directory: %s" % path
return 0
realname=self.__archive.paths[adjusted]
if realname==None:
return 0
else:
return not adjusted.endswith('/')
| gpl-2.0 | -9,174,864,212,907,179,000 | 34.50495 | 74 | 0.55884 | false |
google-research/google-research | kws_streaming/models/xception.py | 1 | 8284 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception - reduced version of keras/applications/xception.py."""
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""Xception model parameters.
Args:
parser_nn: global command line args parser
Returns: parser with updated arguments
"""
parser_nn.add_argument(
'--cnn1_kernel_sizes',
type=str,
default='5',
help='Kernel_size of the conv block 1',
)
parser_nn.add_argument(
'--cnn1_filters',
type=str,
default='32',
help='Number of filters in the conv block 1',
)
parser_nn.add_argument(
'--stride1',
type=int,
default=2,
help='Stride of pooling layer after conv block 1',
)
parser_nn.add_argument(
'--stride2',
type=int,
default=2,
help='Stride of pooling layer after conv block 2 xception',
)
parser_nn.add_argument(
'--stride3',
type=int,
default=2,
help='Stride of pooling layer after conv block 3 xception',
)
parser_nn.add_argument(
'--stride4',
type=int,
default=2,
help='Stride of pooling layer after conv block 4 xception',
)
parser_nn.add_argument(
'--cnn2_kernel_sizes',
type=str,
default='5',
help='Kernel_size of the conv block 2 xception',
)
parser_nn.add_argument(
'--cnn2_filters',
type=str,
default='32',
help='Number of filters in the conv block 2 xception',
)
parser_nn.add_argument(
'--cnn3_kernel_sizes',
type=str,
default='5',
help='Kernel size of the conv block 3 xception',
)
parser_nn.add_argument(
'--cnn3_filters',
type=str,
default='32',
help='Number of filters in the third conv block 3 xception',
)
parser_nn.add_argument(
'--cnn4_kernel_sizes',
type=str,
default='5',
help='Kernel sizes of the conv block 4 xception',
)
parser_nn.add_argument(
'--cnn4_filters',
type=str,
default='32',
help='Number of filters in the conv block4 xception',
)
parser_nn.add_argument(
'--dropout',
type=float,
default=0.0,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--bn_scale',
type=int,
default=1,
help='If True, multiply by gamma. If False, gamma is not used. '
'When the next layer is linear (also e.g. nn.relu), this can be disabled'
'since the scaling will be done by the next layer.',
)
parser_nn.add_argument(
'--units2',
type=str,
default='64',
help='Number of units in the last set of hidden layers',
)
def block(net, kernel_sizes, filters, dropout, bn_scale=False):
"""Utility function to apply conv + BN.
Arguments:
net: input tensor.
kernel_sizes: size of convolution kernel.
filters: filters in `Conv2D`.
dropout: percentage of dropped data
bn_scale: scale batch normalization.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if not filters:
return net
net_residual = net
# project
net_residual = tf.keras.layers.Conv2D(
filters[-1],
kernel_size=1,
padding='same',
use_bias=False,
activation=None)(
net_residual)
net_residual = tf.keras.layers.BatchNormalization(scale=bn_scale)(
net_residual)
for i, (kernel_size, filters) in enumerate(zip(kernel_sizes, filters)):
net = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1),
activation=None,
use_bias=False,
padding='same')(net)
net = tf.keras.layers.Conv2D(
filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None)(net)
net = tf.keras.layers.BatchNormalization(scale=bn_scale)(net)
# in the bottom of this function we add residual connection
# and then apply activation with dropout
# so no need to do another activation and dropout in the end of this loop
if i != len(kernel_sizes)-1:
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dropout(dropout)(net)
net = tf.keras.layers.Add()([net_residual, net])
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dropout(dropout)(net)
return net
def model(flags):
"""Xception model.
It is based on papers:
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/abs/1610.02357
MatchboxNet: 1D Time-Channel Separable Convolutional
Neural Network Architecture for Speech Commands Recognition
https://arxiv.org/pdf/2004.08531
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
# [batch, time, feature]
net = tf.keras.backend.expand_dims(net, axis=2)
# [batch, time, 1, feature]
# conv block
for kernel_size, filters in zip(
utils.parse(flags.cnn1_kernel_sizes), utils.parse(flags.cnn1_filters)):
net = tf.keras.layers.Conv2D(
filters, (kernel_size, 1),
use_bias=False)(net)
net = tf.keras.layers.BatchNormalization(scale=flags.bn_scale)(net)
net = tf.keras.layers.Activation('relu')(net)
# [batch, time, 1, feature]
if flags.stride1 > 1:
net = tf.keras.layers.MaxPooling2D((3, 1),
strides=(flags.stride1, 1),
padding='valid')(
net)
net = block(net, utils.parse(flags.cnn2_kernel_sizes),
utils.parse(flags.cnn2_filters), flags.dropout, flags.bn_scale)
if flags.stride2 > 1:
net = tf.keras.layers.MaxPooling2D((3, 1),
strides=(flags.stride2, 1),
padding='valid')(
net)
net = block(net, utils.parse(flags.cnn3_kernel_sizes),
utils.parse(flags.cnn3_filters), flags.dropout, flags.bn_scale)
if flags.stride3 > 1:
net = tf.keras.layers.MaxPooling2D((3, 1),
strides=(flags.stride3, 1),
padding='valid')(
net)
net = block(net, utils.parse(flags.cnn4_kernel_sizes),
utils.parse(flags.cnn4_filters), flags.dropout, flags.bn_scale)
if flags.stride4 > 1:
net = tf.keras.layers.MaxPooling2D((3, 1),
strides=(flags.stride4, 1),
padding='valid')(
net)
net = tf.keras.layers.GlobalAveragePooling2D()(net)
# [batch, filters]
net = tf.keras.layers.Dropout(flags.dropout)(net)
for units in utils.parse(flags.units2):
net = tf.keras.layers.Dense(
units=units, activation=None, use_bias=False)(
net)
net = tf.keras.layers.BatchNormalization(scale=flags.bn_scale)(net)
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Dense(flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
# [batch, label_count]
return tf.keras.Model(input_audio, net)
| apache-2.0 | 8,488,814,700,639,950,000 | 30.142857 | 79 | 0.617335 | false |
nischalsheth/contrail-controller | src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/haproxy_config.py | 1 | 16976 | from svc_monitor.config_db import *
from os.path import dirname, exists, join
import logging
import yaml
try:
from custom_attributes.haproxy_validator \
import validate_custom_attributes as get_valid_attrs
except ImportError:
custom_attr_dict = {}
def get_valid_attrs(custom_attr_dict, section, custom_attrs):
return {}
PROTO_HTTP = 'HTTP'
PROTO_HTTPS = 'HTTPS'
PROTO_TERMINATED_HTTPS = 'TERMINATED_HTTPS'
PROTO_MAP_V1 = {
'TCP': 'tcp',
'HTTP': 'http',
'HTTPS': 'http',
'TERMINATED_HTTPS': 'http'
}
PROTO_MAP_V2 = {
'TCP': 'tcp',
'HTTP': 'http',
'HTTPS': 'tcp',
'TERMINATED_HTTPS': 'http'
}
LB_METHOD_MAP = {
'ROUND_ROBIN': 'roundrobin',
'LEAST_CONNECTIONS': 'leastconn',
'SOURCE_IP': 'source'
}
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
def get_config_v2(lb):
custom_attr_dict = get_custom_attributes_dict()
custom_attrs = get_custom_attributes_v2(lb)
conf = set_globals(lb.uuid, custom_attr_dict, custom_attrs) + '\n\n'
conf += set_defaults(custom_attr_dict, custom_attrs) + '\n\n'
conf += set_v2_frontend_backend(lb, custom_attr_dict, custom_attrs)
return conf
def get_config_v1(pool):
custom_attr_dict = get_custom_attributes_dict()
custom_attrs = get_custom_attributes_v1(pool)
conf = set_globals(pool.uuid, custom_attr_dict, custom_attrs) + '\n\n'
conf += set_defaults(custom_attr_dict, custom_attrs) + '\n\n'
conf += set_v1_frontend_backend(pool, custom_attr_dict, custom_attrs)
return conf
def get_custom_attributes_dict():
custom_attr_dict = {}
script_dir = dirname(__file__)
rel_path = "custom_attributes/custom_attributes.yml"
abs_file_path = join(script_dir, rel_path)
if exists(abs_file_path):
with open(abs_file_path, 'r') as f:
custom_attr_dict = yaml.safe_load(f)
return custom_attr_dict
def get_custom_attributes_v1(pool):
custom_attrs = {}
custom_attrs[pool.uuid] = {}
for kvp in pool.custom_attributes or []:
custom_attrs[pool.uuid][kvp['key']] = kvp['value']
return custom_attrs
def get_custom_attributes_v2(lb):
custom_attrs = {}
for ll_id in lb.loadbalancer_listeners:
ll = LoadbalancerListenerSM.get(ll_id)
if not ll:
continue
pool = LoadbalancerPoolSM.get(ll.loadbalancer_pool)
if pool:
custom_attrs[pool.uuid] = {}
for kvp in pool.custom_attributes or []:
custom_attrs[pool.uuid][kvp['key']] = kvp['value']
return custom_attrs
def set_globals(uuid, custom_attr_dict, custom_attrs):
agg_custom_attrs = {}
for key, value in custom_attrs.iteritems():
agg_custom_attrs.update(custom_attrs[key])
global_custom_attrs = get_valid_attrs(custom_attr_dict, 'global',
agg_custom_attrs)
if 'max_conn' in global_custom_attrs:
maxconn = global_custom_attrs.pop('max_conn', None)
else:
maxconn = 65000
if 'ssl_ciphers' in global_custom_attrs:
ssl_ciphers = global_custom_attrs.pop('ssl_ciphers', None)
else:
ssl_ciphers = \
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:' \
'ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:' \
'RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS'
conf = [
'global',
'daemon',
'user haproxy',
'group haproxy',
'log /dev/log local0',
'log /dev/log local1 notice',
'tune.ssl.default-dh-param 2048',
'ssl-default-bind-ciphers %s' % ssl_ciphers,
'ulimit-n 200000',
'maxconn %d' % maxconn
]
sock_path = '/var/lib/contrail/loadbalancer/haproxy/'
sock_path += uuid + '/haproxy.sock'
conf.append('stats socket %s mode 0666 level user' % sock_path)
# Adding custom_attributes config
for key, value in global_custom_attrs.iteritems():
cmd = custom_attr_dict['global'][key]['cmd']
conf.append(cmd % value)
res = "\n\t".join(conf)
return res
def set_defaults(custom_attr_dict, custom_attrs):
agg_custom_attrs = {}
for key, value in custom_attrs.iteritems():
agg_custom_attrs.update(custom_attrs[key])
default_custom_attrs = get_valid_attrs(custom_attr_dict, 'default',
agg_custom_attrs)
if 'client_timeout' in default_custom_attrs:
client_timeout = default_custom_attrs.pop('client_timeout', None)
else:
client_timeout = 300000
if 'server_timeout' in default_custom_attrs:
server_timeout = default_custom_attrs.pop('server_timeout', None)
else:
server_timeout = 300000
if 'connect_timeout' in default_custom_attrs:
connect_timeout = default_custom_attrs.pop('connect_timeout', None)
else:
connect_timeout = 5000
conf = [
'defaults',
'log global',
'retries 3',
'option redispatch',
'timeout connect %d' % connect_timeout,
'timeout client %d' % client_timeout,
'timeout server %d' % server_timeout,
]
# Adding custom_attributes config
for key, value in default_custom_attrs.iteritems():
cmd = custom_attr_dict['default'][key]['cmd']
conf.append(cmd % value)
res = "\n\t".join(conf)
return res
def set_v1_frontend_backend(pool, custom_attr_dict, custom_attrs):
conf = []
vip = VirtualIpSM.get(pool.virtual_ip)
if not vip or not vip.params['admin_state']:
return "\n"
ssl = ''
if vip.params['protocol'] == PROTO_HTTPS:
ssl = 'ssl crt haproxy_ssl_cert_path no-sslv3'
lconf = [
'frontend %s' % vip.uuid,
'option tcplog',
'bind %s:%s %s' % (vip.params['address'],
vip.params['protocol_port'], ssl),
'mode %s' % PROTO_MAP_V1[vip.params['protocol']],
]
if 'connection_limit' in vip.params and vip.params['connection_limit'] > 0:
lconf.append('maxconn %d' % vip.params['connection_limit'])
if vip.params['protocol'] == PROTO_HTTP or \
vip.params['protocol'] == PROTO_HTTPS:
lconf.append('option forwardfor')
if pool and pool.params['admin_state']:
frontend_custom_attrs = get_valid_attrs(custom_attr_dict, 'frontend',
custom_attrs[pool.uuid])
lconf.append('default_backend %s' % pool.uuid)
# Adding custom_attributes config
for key, value in frontend_custom_attrs.iteritems():
cmd = custom_attr_dict['frontend'][key]['cmd']
lconf.append(cmd % value)
res = "\n\t".join(lconf) + '\n\n'
res += set_backend_v1(pool, custom_attr_dict, custom_attrs)
conf.append(res)
return "\n".join(conf)
def set_backend_v1(pool, custom_attr_dict, custom_attrs):
backend_custom_attrs = get_valid_attrs(custom_attr_dict, 'backend',
custom_attrs[pool.uuid])
conf = [
'backend %s' % pool.uuid,
'mode %s' % PROTO_MAP_V1[pool.params['protocol']],
'balance %s' % LB_METHOD_MAP[pool.params['loadbalancer_method']]
]
if pool.params['protocol'] == PROTO_HTTP:
conf.append('option forwardfor')
server_suffix = ''
for hm_id in pool.loadbalancer_healthmonitors:
hm = HealthMonitorSM.get(hm_id)
if not hm:
continue
server_suffix, monitor_conf = set_health_monitor(hm)
conf.extend(monitor_conf)
session_conf = set_session_persistence(pool)
conf.extend(session_conf)
for member_id in pool.members:
member = LoadbalancerMemberSM.get(member_id)
if not member or not member.params['admin_state']:
continue
server = (('server %s %s:%s weight %s') % (member.uuid,
member.params['address'], member.params['protocol_port'],
member.params['weight'])) + server_suffix
conf.append(server)
# Adding custom_attributes config
for key, value in backend_custom_attrs.iteritems():
cmd = custom_attr_dict['backend'][key]['cmd']
conf.append(cmd % value)
return "\n\t".join(conf) + '\n'
def get_listeners(lb):
listeners = []
if lb.device_owner == 'K8S:LOADBALANCER':
for ll_id in lb.loadbalancer_listeners:
entry_found = False
ll = LoadbalancerListenerSM.get(ll_id)
if not ll:
continue
if not ll.params['admin_state']:
continue
port = ll.params['protocol_port']
for listener_entry in listeners or []:
if listener_entry['port'] == port:
entry_found = True
break
if entry_found == True:
sni_containers = listener_entry['sni_containers']
sni_containers.extend(ll.params['sni_containers'])
pools = listener_entry['pools']
pools.append(ll.loadbalancer_pool)
else:
listener = {}
listener['port'] = port
listener['obj'] = ll
listener['sni_containers'] = []
if 'sni_containers' in ll.params:
listener['sni_containers'].extend(ll.params['sni_containers'])
pools = []
pools.append(ll.loadbalancer_pool)
listener['pools'] = pools
listeners.append(listener)
else:
for ll_id in lb.loadbalancer_listeners:
ll = LoadbalancerListenerSM.get(ll_id)
if not ll:
continue
if not ll.params['admin_state']:
continue
listener = {}
listener['obj'] = ll
listener['sni_containers'] = []
if 'sni_containers' in ll.params:
listener['sni_containers'].extend(ll.params['sni_containers'])
pools = []
pools.append(ll.loadbalancer_pool)
listener['pools'] = pools
listeners.append(listener)
return listeners
def set_v2_frontend_backend(lb, custom_attr_dict, custom_attrs):
conf = []
lconf = ""
pconf = ""
listeners = get_listeners(lb)
for listener in listeners:
ll = listener['obj']
sni_containers = listener['sni_containers']
ssl = 'ssl'
tls_sni_presence = False
if ll.params['protocol'] == PROTO_TERMINATED_HTTPS:
if ll.params['default_tls_container']:
ssl += ' crt__%s' % ll.params['default_tls_container']
tls_sni_presence = True
for sni_container in sni_containers:
ssl += ' crt__%s' % sni_container
tls_sni_presence = True
if (tls_sni_presence == False):
ssl = ''
else:
ssl += ' no-sslv3'
conf = [
'frontend %s' % ll.uuid,
'option tcplog',
'bind %s:%s %s' % (lb.params['vip_address'],
ll.params['protocol_port'], ssl),
'mode %s' % PROTO_MAP_V2[ll.params['protocol']],
]
if 'connection_limit' in ll.params and ll.params['connection_limit'] > 0:
conf.append('maxconn %d' % ll.params['connection_limit'])
if ll.params['protocol'] == PROTO_HTTP:
conf.append('option forwardfor')
pools = listener['pools']
for pool_id in pools or []:
pool = LoadbalancerPoolSM.get(pool_id)
if pool and pool.params['admin_state']:
frontend_custom_attrs = get_valid_attrs(custom_attr_dict,
'frontend',
custom_attrs[pool.uuid])
annotations = pool.annotations
acl = {}
if annotations and 'key_value_pair' in annotations:
for kv in annotations['key_value_pair'] or []:
acl[kv['key']] = kv['value']
if 'type' not in acl or acl['type'] == 'default':
conf.append('default_backend %s' % pool.uuid)
else:
acl_cdn = ""
host_cdn = ""
path_cdn = ""
if 'host' in acl:
host_cdn = "%s_host" % pool.uuid
conf.append('acl %s hdr(host) -i %s' %(host_cdn, acl['host']))
if 'path' in acl:
path_cdn = "%s_path" % pool.uuid
conf.append('acl %s_path path %s' %(pool.uuid, acl['path']))
acl_cdn = host_cdn + " " + path_cdn
conf.append('use_backend %s if %s' %(pool.uuid, acl_cdn))
else:
conf.append('default_backend %s' % pool.uuid)
# Adding custom_attributes config
for key, value in frontend_custom_attrs.iteritems():
cmd = custom_attr_dict['frontend'][key]['cmd']
conf.append(cmd % value)
conf.append("\n")
pconf += set_backend_v2(pool, custom_attr_dict, custom_attrs)
lconf += "\n\t".join(conf)
conf = []
lconf = lconf[:-1]
conf.append(lconf)
pconf = pconf[:-2]
conf.append(pconf)
return "\n".join(conf)
def set_backend_v2(pool, custom_attr_dict, custom_attrs):
backend_custom_attrs = get_valid_attrs(custom_attr_dict, 'backend',
custom_attrs[pool.uuid])
conf = [
'backend %s' % pool.uuid,
'mode %s' % PROTO_MAP_V2[pool.params['protocol']],
'balance %s' % LB_METHOD_MAP[pool.params['loadbalancer_method']]
]
if pool.params['protocol'] == PROTO_HTTP:
conf.append('option forwardfor')
server_suffix = ''
for hm_id in pool.loadbalancer_healthmonitors:
hm = HealthMonitorSM.get(hm_id)
if not hm:
continue
server_suffix, monitor_conf = set_health_monitor(hm)
conf.extend(monitor_conf)
session_conf = set_session_persistence(pool)
conf.extend(session_conf)
for member_id in pool.members:
member = LoadbalancerMemberSM.get(member_id)
if not member or not member.params['admin_state']:
continue
server = (('server %s %s:%s weight %s') % (member.uuid,
member.params['address'], member.params['protocol_port'],
member.params['weight'])) + server_suffix
conf.append(server)
# Adding custom_attributes config
for key, value in backend_custom_attrs.iteritems():
cmd = custom_attr_dict['backend'][key]['cmd']
conf.append(cmd % value)
return "\n\t".join(conf) + "\n\n"
def set_health_monitor(hm):
if not hm.params['admin_state']:
return '', []
server_suffix = ' check inter %ss fall %s' % \
(hm.params['delay'], hm.params['max_retries'])
conf = [
'timeout check %ss' % hm.params['timeout']
]
if hm.params['monitor_type'] in (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS):
conf.append('option httpchk %s %s' %
(hm.params['http_method'], hm.params['url_path']))
conf.append(
'http-check expect rstatus %s' %
'|'.join(_get_codes(hm.params['expected_codes']))
)
if hm.params['monitor_type'] == HEALTH_MONITOR_HTTPS:
conf.append('option ssl-hello-chk')
return server_suffix, conf
def set_session_persistence(pool):
conf = []
if pool.virtual_ip:
vip = VirtualIpSM.get(pool.virtual_ip)
if not vip:
return
persistence = vip.params.get('persistence_type', None)
cookie = vip.params.get('persistence_cookie_name', None)
else:
persistence = pool.params.get('session_persistence', None)
cookie = pool.params.get('persistence_cookie_name', None)
if persistence == PERSISTENCE_SOURCE_IP:
conf.append('stick-table type ip size 10k')
conf.append('stick on src')
elif persistence == PERSISTENCE_HTTP_COOKIE:
conf.append('cookie SRV insert indirect nocache')
elif (persistence == PERSISTENCE_APP_COOKIE and cookie):
conf.append('appsession %s len 56 timeout 3h' % cookie)
return conf
def _get_codes(codes):
response = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
response.update(str(i) for i in xrange(int(low), int(hi) + 1))
else:
response.add(code)
return response
| apache-2.0 | 5,685,654,613,463,615,000 | 34.366667 | 90 | 0.560792 | false |
ivotkv/neolixir | tests/test_relationship.py | 1 | 2873 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from common import *
def test_expunge(m):
n1 = TNode()
n2 = TNode()
rel = n1.rel_out.append(n2)
assert n1 in m.session.phantomnodes
assert n2 in m.session.phantomnodes
assert rel in m.session.relmap
n1.expunge()
assert n1 not in m.session.phantomnodes
assert n2 in m.session.phantomnodes
assert rel not in m.session.relmap
def test_save_load_delete(m):
n1 = TNode()
n2 = TNode()
r = TRel.get((n1, 'test', n2))
m.session.commit()
assert r.id is not None
r_id = r.id
m.session.clear()
r = Relationship.get(r_id)
assert r.id == r_id
assert isinstance(r, TRel)
assert r.type == 'test'
assert isinstance(r.start, TNode)
assert isinstance(r.end, TNode)
n1 = r.start
n2 = r.end
n1.delete()
assert r.is_deleted()
assert not n2.is_deleted()
m.session.commit()
with raises(EntityNotFoundException):
Relationship.get(r_id)
def test_load_with_deleted_end_node(m):
n1 = TNode()
n2 = TNode()
rel = n1.trel_out.append(n2)
m.session.commit()
n1_id = n1.id
n2_id = n2.id
rel_id = rel.id
m.session.clear()
# through relview
n1 = TNode.get(n1_id)
n2 = TNode.get(n2_id)
n2.delete()
assert m.session.count == 2
assert n2 not in n1.trel_out
assert m.session.count == 3
rel = TRel.get(rel_id)
assert rel in m.session
assert rel.is_deleted()
assert m.session.count == 3
m.session.clear()
# direct load
n1 = TNode.get(n1_id)
n1.delete()
assert m.session.count == 1
rel = TRel.get(rel_id)
assert rel in m.session
assert rel.is_deleted()
n2 = TNode.get(n2_id)
assert rel not in n2.trel_in
assert m.session.count == 3
| mit | 4,453,792,110,003,353,600 | 32.8 | 462 | 0.676993 | false |
SyrakuShaikh/python | learning/a_byte_of_python/str_format.py | 1 | 1163 | age = 20
name = 'Shaikh'
print('{0} was {1} years old when he wrote this book'.format(name, age))
print('Why is {0} playing with that python?'.format(name))
# Omit the index in '{}'
print('{} was {} years old when he wrote this book'.format(name, age))
print('Why is {} playing with that python?'.format(name))
# More detailed specifications
# decimal (.) precision of 3 for float '0.333'
print('{0:.3f}.format(1.0/3) '+'{0:.3f}'.format(1.0/3))
print('{0:.3f}.format(1/3) '+'{0:.3f}'.format(1/3))
# fill with underscores (_) with the text centered
# (^) to 11 width '___hello___'
print('11 {0:_^11}'.format('hello'))
print('15 {0:_^15}'.format('hello'))
print('16 {0:_^16}'.format('hello'))
# keyword-based 'Swaroop wrote A Byte of Python'
print('{name} wrote {book}'.format(name='Swaroop', book='A Byte of Python'))
# print without the default '\n'
print('a', end='')
print('b', end='')
print()
print('a', end=' ')
print('b', end=' ')
print('c')
# Escape sequence '\'
print('\ ')
print('\'')
print('This is the first line\nThis is the second line')
print("This is the first sentence. \
This is the second sentence.")
print(r"Newlines are indicated by \n")
| gpl-3.0 | 5,028,002,855,142,084,000 | 29.605263 | 76 | 0.638865 | false |
jiawen/Halide | python_bindings/tutorial/lesson_12_using_the_gpu.py | 1 | 11807 | #!/usr/bin/python3
# Halide tutorial lesson 12.
# This lesson demonstrates how to use Halide to run code on a GPU.
# This lesson can be built by invoking the command:
# make tutorial_lesson_12_using_the_gpu
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -lpthread -ldl -o lesson_12
# LD_LIBRARY_PATH=../bin ./lesson_12
# On os x:
# g++ lesson_12*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide `libpng-config --cflags --ldflags` -o lesson_12
# DYLD_LIBRARY_PATH=../bin ./lesson_12
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
from halide import *
# Include some support code for loading pngs.
#include "image_io.h"
from scipy.misc import imread
import os.path
# Include a clock to do performance testing.
#include "clock.h"
from datetime import datetime
# Define some Vars to use.
x, y, c, i = Var("x"), Var("y"), Var("c"), Var("i")
# We're going to want to schedule a pipeline in several ways, so we
# define the pipeline in a class so that we can recreate it several
# times with different schedules.
class MyPipeline:
def __init__(self, input):
assert type(input) == Image_uint8
self.lut = Func("lut")
self.padded = Func("padded")
self.padded16 = Func("padded16")
self.sharpen = Func("sharpen")
self.curved = Func("curved")
self.input = input
# For this lesson, we'll use a two-stage pipeline that sharpens
# and then applies a look-up-table (LUT).
# First we'll define the LUT. It will be a gamma curve.
self.lut[i] = cast(UInt(8), clamp(pow(i / 255.0, 1.2) * 255.0, 0, 255))
# Augment the input with a boundary condition.
self.padded[x, y, c] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1), c]
# Cast it to 16-bit to do the math.
self.padded16[x, y, c] = cast(UInt(16), self.padded[x, y, c])
# Next we sharpen it with a five-tap filter.
self.sharpen[x, y, c] = (self.padded16[x, y, c] * 2-
(self.padded16[x - 1, y, c] +
self.padded16[x, y - 1, c] +
self.padded16[x + 1, y, c] +
self.padded16[x, y + 1, c]) / 4)
# Then apply the LUT.
self.curved[x, y, c] = self.lut[self.sharpen[x, y, c]]
# Now we define methods that give our pipeline several different
# schedules.
def schedule_for_cpu(self):
# Compute the look-up-table ahead of time.
self.lut.compute_root()
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Look-up-tables don't vectorize well, so just parallelize
# curved in slices of 16 scanlines.
yo, yi = Var("yo"), Var("yi")
self.curved.split(y, yo, yi, 16) \
.parallel(yo)
# Compute sharpen as needed per scanline of curved, reusing
# previous values computed within the same strip of 16
# scanlines.
self.sharpen.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Vectorize the sharpen. It's 16-bit so we'll vectorize it 8-wide.
self.sharpen.vectorize(x, 8)
# Compute the padded input at the same granularity as the
# sharpen. We'll leave the cast to 16-bit inlined into
# sharpen.
self.padded.store_at(self.curved, yo) \
.compute_at(self.curved, yi)
# Also vectorize the padding. It's 8-bit, so we'll vectorize
# 16-wide.
self.padded.vectorize(x, 16)
# JIT-compile the pipeline for the CPU.
self.curved.compile_jit()
return
# Now a schedule that uses CUDA or OpenCL.
def schedule_for_gpu(self):
# We make the decision about whether to use the GPU for each
# Func independently. If you have one Func computed on the
# CPU, and the next computed on the GPU, Halide will do the
# copy-to-gpu under the hood. For this pipeline, there's no
# reason to use the CPU for any of the stages. Halide will
# copy the input image to the GPU the first time we run the
# pipeline, and leave it there to reuse on subsequent runs.
# As before, we'll compute the LUT once at the start of the
# pipeline.
self.lut.compute_root()
# Let's compute the look-up-table using the GPU in 16-wide
# one-dimensional thread blocks. First we split the index
# into blocks of size 16:
block, thread = Var("block"), Var("thread")
self.lut.split(i, block, thread, 16)
# Then we tell cuda that our Vars 'block' and 'thread'
# correspond to CUDA's notions of blocks and threads, or
# OpenCL's notions of thread groups and threads.
self.lut.gpu_blocks(block) \
.gpu_threads(thread)
# This is a very common scheduling pattern on the GPU, so
# there's a shorthand for it:
# lut.gpu_tile(i, 16)
# Func::gpu_tile method is similar to Func::tile, except that
# it also specifies that the tile coordinates correspond to
# GPU blocks, and the coordinates within each tile correspond
# to GPU threads.
# Compute color channels innermost. Promise that there will
# be three of them and unroll across them.
self.curved.reorder(c, x, y) \
.bound(c, 0, 3) \
.unroll(c)
# Compute curved in 2D 8x8 tiles using the GPU.
self.curved.gpu_tile(x, y, 8, 8)
# This is equivalent to:
# curved.tile(x, y, xo, yo, xi, yi, 8, 8)
# .gpu_blocks(xo, yo)
# .gpu_threads(xi, yi)
# We'll leave sharpen as inlined into curved.
# Compute the padded input as needed per GPU block, storing the
# intermediate result in shared memory. Var::gpu_blocks, and
# Var::gpu_threads exist to help you schedule producers within
# GPU threads and blocks.
self.padded.compute_at(self.curved, Var.gpu_blocks())
# Use the GPU threads for the x and y coordinates of the
# padded input.
self.padded.gpu_threads(x, y)
# JIT-compile the pipeline for the GPU. CUDA or OpenCL are
# not enabled by default. We have to construct a Target
# object, enable one of them, and then pass that target
# object to compile_jit. Otherwise your CPU will very slowly
# pretend it's a GPU, and use one thread per output pixel.
# Start with a target suitable for the machine you're running
# this on.
target = get_host_target()
# Then enable OpenCL or CUDA.
#use_opencl = False
use_opencl = True
if use_opencl:
# We'll enable OpenCL here, because it tends to give better
# performance than CUDA, even with NVidia's drivers, because
# NVidia's open source LLVM backend doesn't seem to do all
# the same optimizations their proprietary compiler does.
target.set_feature(TargetFeature.OpenCL)
print("(Using OpenCL)")
else:
# Uncomment the next line and comment out the line above to
# try CUDA instead.
target.set_feature(TargetFeature.CUDA)
print("(Using CUDA)")
# If you want to see all of the OpenCL or CUDA API calls done
# by the pipeline, you can also enable the Debug
# flag. This is helpful for figuring out which stages are
# slow, or when CPU -> GPU copies happen. It hurts
# performance though, so we'll leave it commented out.
# target.set_feature(TargetFeature.Debug)
self.curved.compile_jit(target)
def test_performance(self):
# Test the performance of the scheduled MyPipeline.
output = Image(UInt(8),
self.input.width(),
self.input.height(),
self.input.channels())
# Run the filter once to initialize any GPU runtime state.
self.curved.realize(output)
# Now take the best of 3 runs for timing.
best_time = float("inf")
for i in range(3):
t1 = datetime.now()
# Run the filter 100 times.
for j in range(100):
self.curved.realize(output)
# Force any GPU code to finish by copying the buffer back to the CPU.
output.copy_to_host()
t2 = datetime.now()
elapsed = (t2 - t1).total_seconds()
if elapsed < best_time:
best_time = elapsed
# end of "best of three times"
print("%1.4f milliseconds" % (best_time * 1000))
def test_correctness(self, reference_output):
assert type(reference_output) == Image_uint8
output = self.curved.realize(self.input.width(),
self.input.height(),
self.input.channels())
assert type(output) == Image_uint8
# Check against the reference output.
for c in range(self.input.channels()):
for y in range(self.input.height()):
for x in range(self.input.width()):
if output(x, y, c) != reference_output(x, y, c):
print(
"Mismatch between output (%d) and "
"reference output (%d) at %d, %d, %d" % (
output(x, y, c),
reference_output(x, y, c),
x, y, c))
return
print("CPU and GPU outputs are consistent.")
def main():
# Load an input image.
image_path = os.path.join(os.path.dirname(__file__), "../../tutorial/images/rgb.png")
input_data = imread(image_path)
input = Image(input_data)
# Allocated an image that will store the correct output
reference_output = Image(UInt(8), input.width(), input.height(), input.channels())
print("Testing performance on CPU:")
p1 = MyPipeline(input)
p1.schedule_for_cpu()
p1.test_performance()
p1.curved.realize(reference_output)
if have_opencl():
print("Testing performance on GPU:")
p2 = MyPipeline(input)
p2.schedule_for_gpu()
p2.test_performance()
p2.test_correctness(reference_output)
else:
print("Not testing performance on GPU, "
"because I can't find the opencl library")
return 0
def have_opencl():
"""
A helper function to check if OpenCL seems to exist on this machine.
:return: bool
"""
import ctypes
import platform
try:
if platform.system() == "Windows":
ret = ctypes.windll.LoadLibrary("OpenCL.dll") != None
elif platform.system() == "Darwin": # apple
ret = ctypes.cdll.LoadLibrary("/System/Library/Frameworks/OpenCL.framework/Versions/Current/OpenCL") != None
elif platform.system() == "Linux":
ret = ctypes.cdll.LoadLibrary("libOpenCL.so") != None
else:
raise Exception("Cannot check for opencl presence "
"on unknown system '%s'" % platform.system())
except OSError:
ret = False
return ret
if __name__ == "__main__":
main()
| mit | -2,134,254,626,117,046,000 | 34.778788 | 130 | 0.583806 | false |
amorgun/blackbox-2016 | regression_bot/bot.py | 1 | 2361 | import time
import interface as bbox
import numpy as np
# initial: Level score= 2308.362061 14s
# Optimized: Level score= 2308.362061 6.542621850967407s
# test level
# baseline: 2221.729980
# best: 2246.279541
# best_coefs_score=2560.100830078125_sigma=0.004999999888241291.txt: 2158.130615
# best_coefs_score=2964.60009765625_sigma=0.0010000000474974513.txt: 2259.347900
# star3 - subfit_best_coefs_score=2621.0400390625_sigma=0.009999999776482582.txt: 2621.040039
# star 4-subfit_best_coefs_score=2738.301513671875_sigma=0.009999999776482582.txt: 2738.301514
# star 5-best_coefs_score=2966.489501953125_sigma=0.009999999776482582_level=train_level: 2422.259033
# star 6-best_coefs_score=2964.60009765625_sigma=0.10000000149011612_level=train_level: 2259.347900
# star 7-best_coefs_score=2994.271240234375_sigma=0.009999999776482582_level=train_level:
# star 8-best_coefs_score=2992.164794921875_sigma=0.0010000000474974513_level=train_level:
# star 9-best_coefs_score=3017.848388671875_sigma=0.0010000000474974513_level=train_level: 2389.348633
# star 10-best_coefs_score=2972.124267578125_sigma=9.999999747378752e-05_level=train_level.txt: 2257.179688
# star 13-best_coefs_score=2980.401123046875_sigma=0.0010000000474974513_level=train_level.txt:
def get_action_by_state(state, coefs):
return np.argmax(np.dot(coefs, state))
n_features = 36
n_actions = 4
max_time = -1
def prepare_bbox():
global n_features, n_actions, max_time
if bbox.is_level_loaded():
bbox.reset_level()
else:
bbox.load_level("../levels/test_level.data", verbose=1)
n_features = bbox.get_num_of_features()
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
def load_regression_coefs(filename):
coefs = np.loadtxt(filename)
return coefs
def run_bbox():
start_time = time.time()
has_next = 1
prepare_bbox()
coefs = load_regression_coefs("star 13-best_coefs_score=2980.401123046875_sigma=0.0010000000474974513_level=train_level.txt")
state = np.ones(n_features + 1)
while has_next:
state[:-1] = bbox.get_state()
action = get_action_by_state(state, coefs)
has_next = bbox.do_action(action)
bbox.finish(verbose=1)
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
run_bbox() | mit | 2,967,137,232,418,529,300 | 32.267606 | 129 | 0.717069 | false |
lichong012245/django-lfs-0.7.8 | lfs/checkout/tests/test_checkout.py | 1 | 13155 | # django imports
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.backends.file import SessionStore
from django.shortcuts import get_object_or_404
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core import mail
# test imports
from lfs.catalog.models import Product
from lfs.cart.models import Cart
from lfs.cart.models import CartItem
from lfs.cart.views import add_to_cart
from lfs.cart import utils as cart_utils
from lfs.core.models import Shop, Country
from lfs.core.utils import get_default_shop
from lfs.customer.models import Customer, Address
from lfs.order.models import Order
from lfs.order.settings import SUBMITTED
from lfs.order.utils import add_order
from lfs.payment.models import PaymentMethod
from lfs.payment.settings import BY_INVOICE
from lfs.shipping.models import ShippingMethod
from lfs.tax.models import Tax
# 3rd party imports
from postal.library import form_factory
class CheckoutTestCase(TestCase):
"""
"""
fixtures = ['lfs_shop.xml']
def setUp(self):
"""
"""
ie = Country.objects.get(code="ie")
gb = Country.objects.get(code="gb")
de = Country.objects.get(code="de")
us = Country.objects.get(code="us")
fr = Country.objects.get(code="fr")
nl = Country.objects.get(code="nl")
shop = get_default_shop()
for ic in Country.objects.all():
shop.invoice_countries.add(ic)
shop.shipping_countries.add(nl)
shop.save()
tax = Tax.objects.create(rate=19)
shipping_method = ShippingMethod.objects.create(
name="Standard",
active=True,
price=1.0,
tax=tax
)
self.by_invoice = PaymentMethod.objects.get(pk=BY_INVOICE)
address1 = Address.objects.create(
firstname="John",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 42",
city="2342",
state="Gotham City",
country=gb,
)
address2 = Address.objects.create(
firstname="Jane",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 43",
city="2443",
state="Smallville",
country=fr,
)
self.username = 'joe'
self.password = 'bloggs'
new_user = User(username=self.username)
new_user.set_password(self.password)
new_user.save()
self.user = new_user
self.customer = Customer.objects.create(
user=new_user,
selected_shipping_method=shipping_method,
selected_payment_method=self.by_invoice,
selected_shipping_address=address1,
selected_invoice_address=address2,
)
self.PRODUCT1_NAME = "Surfboard"
p1 = Product.objects.create(
name=self.PRODUCT1_NAME,
slug="product-1",
sku="sku-1",
price=1.1,
tax=tax,
stock_amount=100,
active=True,
)
p2 = Product.objects.create(
name="Product 2",
slug="product-2",
sku="sku-2",
price=2.2,
tax=tax,
stock_amount=50,
active=True,
)
cart = Cart.objects.create(
user=new_user
)
self.item1 = CartItem.objects.create(
cart=cart,
product=p1,
amount=2,
)
self.item2 = CartItem.objects.create(
cart=cart,
product=p2,
amount=3,
)
self.c = Client()
def test_login(self):
"""Tests the login view.
"""
from lfs.checkout.views import login
from lfs.checkout.settings import CHECKOUT_TYPE_ANON
from lfs.tests.utils import create_request
request = create_request()
# Anonymous
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
result = login(request)
self.assertEqual(result.status_code, 200)
# Set checkout_type
shop = get_default_shop()
shop.checkout_type = CHECKOUT_TYPE_ANON
shop.save()
# Fake a new reuqest
request.shop = shop
result = login(request)
self.assertEqual(result.status_code, 302)
# Authenticated
request.user = self.user
result = login(request)
self.assertEqual(result.status_code, 302)
def dump_response(self, http_response):
fo = open('tests_checkout.html', 'w')
fo.write(str(http_response))
fo.close()
def test_checkout_page(self):
"""Tests that checkout page gets populated with correct details
"""
# login as our customer
logged_in = self.c.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
cart_response = self.c.get(reverse('lfs_cart'))
self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)
checkout_response = self.c.get(reverse('lfs_checkout'))
self.assertContains(checkout_response, 'Smallville', status_code=200)
def test_checkout_country_after_cart_country_change(self):
"""Tests that checkout page gets populated with correct details
"""
# login as our customer
logged_in = self.c.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
cart_response = self.c.get(reverse('lfs_cart'))
self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)
user = User.objects.get(username=self.username)
customer = Customer.objects.get(user=user)
fr = Country.objects.get(code="fr")
self.assertEquals(customer.selected_invoice_address.country.code, "fr")
# change the country in the cart
de = Country.objects.get(code="de")
cart_response = self.c.post('/refresh-cart', {'country': de.code.lower(), "amount-cart-item_%s" % self.item1.id: 1, "amount-cart-item_%s" % self.item2.id: 1})
customer = Customer.objects.get(user=user)
self.assertEquals(customer.selected_shipping_address.country.code.lower(), "de")
self.assertEquals(customer.selected_invoice_address.country.code.lower(), "de")
cart_response = self.c.get(reverse('lfs_cart'))
self.assertContains(cart_response, self.PRODUCT1_NAME, status_code=200)
checkout_response = self.c.get(reverse('lfs_checkout'))
self.assertContains(checkout_response, '<option value="DE" selected="selected">Deutschland</option>', status_code=200)
def test_order_phone_email_set_after_checkout(self):
# login as our customer
logged_in = self.c.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
# check initial database quantities
self.assertEquals(Address.objects.count(), 2)
self.assertEquals(Customer.objects.count(), 1)
self.assertEquals(Order.objects.count(), 0)
# check we have no invoice or shipping phone or email prior to checkout
our_customer = Customer.objects.all()[0]
self.assertEqual(our_customer.selected_invoice_address.phone, '')
self.assertEqual(our_customer.selected_invoice_address.email, None)
self.assertEqual(our_customer.selected_shipping_address.phone, '')
self.assertEqual(our_customer.selected_shipping_address.email, None)
checkout_data = {'invoice_firstname': 'bob',
'invoice_lastname': 'builder',
'invoice-line1': 'de company',
'invoice-line2': 'de street',
'invoice-city': 'de area',
'invoice-state': 'de town',
'invoice-code': 'cork',
'invoice-country': "IE",
'invoice_email': '[email protected]',
'invoice_phone': '1234567',
'shipping_firstname': 'hans',
'shipping_lastname': 'schmidt',
'shipping-line1': 'orianenberger strasse',
'shipping-line2': 'de town',
'shipping-city': 'stuff',
'shipping-state': 'BE',
'shipping-code': '12345',
'shipping-country': "DE",
'payment_method': self.by_invoice.id,
'shipping_email': '[email protected]',
'shipping_phone': '7654321',
}
checkout_post_response = self.c.post(reverse('lfs_checkout'), checkout_data)
self.assertRedirects(checkout_post_response, reverse('lfs_thank_you'), status_code=302, target_status_code=200,)
# check that an order email got sent
self.assertEqual(getattr(settings, 'LFS_SEND_ORDER_MAIL_ON_CHECKOUT', True), True)
self.assertEqual(getattr(settings, 'LFS_SEND_ORDER_MAIL_ON_PAYMENT', False), False)
self.assertEqual(len(mail.outbox), 1)
# check database quantities post-checkout
self.assertEquals(Address.objects.count(), 2)
self.assertEquals(Customer.objects.count(), 1)
self.assertEquals(Order.objects.count(), 1)
# check our customer details post checkout
our_customer = Customer.objects.all()[0]
self.assertEqual(our_customer.selected_invoice_address.phone, "1234567")
self.assertEqual(our_customer.selected_invoice_address.email, "[email protected]")
self.assertEqual(our_customer.selected_shipping_address.phone, '7654321')
self.assertEqual(our_customer.selected_shipping_address.email, "[email protected]")
def test_checkout_with_4_line_shipping_address(self):
# login as our customer
logged_in = self.c.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
# test that our Netherlands form has only 4 address line fields
nl_form_class = form_factory("NL")
nl_form = nl_form_class()
self.assertEqual('state' in nl_form.fields, False)
self.assertEqual('code' in nl_form.fields, True)
# check initial database quantities
self.assertEquals(Address.objects.count(), 2)
self.assertEquals(Customer.objects.count(), 1)
self.assertEquals(Order.objects.count(), 0)
# check we have no invoice or shipping phone or email prior to checkout
our_customer = Customer.objects.all()[0]
self.assertEqual(our_customer.selected_invoice_address.phone, '')
self.assertEqual(our_customer.selected_invoice_address.email, None)
self.assertEqual(our_customer.selected_shipping_address.phone, '')
self.assertEqual(our_customer.selected_shipping_address.email, None)
checkout_data = {'invoice_firstname': 'bob',
'invoice_lastname': 'builder',
'invoice-line1': 'de company',
'invoice-line2': 'de street',
'invoice-city': 'de area',
'invoice-state': 'de town',
'invoice-code': '1234AB',
'invoice-country': "NL",
'invoice_email': '[email protected]',
'invoice_phone': '1234567',
'shipping_firstname': 'hans',
'shipping_lastname': 'schmidt',
'shipping-line1': 'orianenberger strasse',
'shipping-line2': 'de town',
'shipping-city': 'stuff',
'shipping-state': 'BE',
'shipping-code': '1234AB',
'shipping-country': "NL",
'payment_method': self.by_invoice.id,
'shipping_email': '[email protected]',
'shipping_phone': '7654321',
}
checkout_post_response = self.c.post(reverse('lfs_checkout'), checkout_data)
self.assertRedirects(checkout_post_response, reverse('lfs_thank_you'), status_code=302, target_status_code=200,)
# check database quantities post-checkout
self.assertEquals(Address.objects.count(), 2)
self.assertEquals(Customer.objects.count(), 1)
self.assertEquals(Order.objects.count(), 1)
# check our customer details post checkout
our_customer = Customer.objects.all()[0]
self.assertEqual(our_customer.selected_invoice_address.phone, "1234567")
self.assertEqual(our_customer.selected_invoice_address.email, "[email protected]")
self.assertEqual(our_customer.selected_shipping_address.phone, '7654321')
self.assertEqual(our_customer.selected_shipping_address.email, "[email protected]")
| bsd-3-clause | 2,927,025,104,021,376,500 | 37.80531 | 166 | 0.593615 | false |
Cal-CS-61A-Staff/ok | tests/test_files_cloud.py | 1 | 3602 | """
This module contains integration tests which verify the behavior of the storage
module against supported cloud storage providers. The tests for the local
(directory-based) storage backend are re-used and run for each supported cloud
storage provider. To add tests for a new provider, create a new subclass of
CloudTestFile.
To run tests for Google Cloud Platform, set the following environment variables:
- GCP_STORAGE_KEY
- GCP_STORAGE_SECRET
- GCP_STORAGE_CONTAINER
To run tests for Azure, set the following environment variables:
- AZURE_STORAGE_KEY (this is the storage account name in the Azure Portal)
- AZURE_STORAGE_SECRET (this is the storage account key in the Azure Portal)
- AZURE_STORAGE_CONTAINER
"""
import os
import unittest
import requests
from server.settings.test import Config as TestConfig
from tests.test_files import TestFile
class TestConfigMutationMixin(object):
__config_backup = {}
@classmethod
def set_config(cls, key, value):
cls.__config_backup.setdefault(key, getattr(TestConfig, key, None))
setattr(TestConfig, key, value)
@classmethod
def restore_config(cls, key):
original_value = cls.__config_backup.get(key)
setattr(TestConfig, key, original_value)
class CloudTestFile(TestFile, TestConfigMutationMixin):
storage_provider = ""
key_env_name = ""
secret_env_name = ""
container_env_name = ""
@classmethod
def setUpClass(cls):
super().setUpClass()
storage_key = os.getenv(cls.key_env_name)
storage_secret = os.getenv(cls.secret_env_name)
storage_container = os.getenv(cls.container_env_name)
if not storage_key or not storage_secret or not storage_container:
raise unittest.SkipTest("Cloud storage credentials for {} not configured".format(cls.storage_provider))
cls.set_config("STORAGE_PROVIDER", cls.storage_provider)
cls.set_config("STORAGE_KEY", storage_key)
cls.set_config("STORAGE_SECRET", storage_secret)
cls.set_config("STORAGE_CONTAINER", storage_container)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.restore_config("STORAGE_PROVIDER")
cls.restore_config("STORAGE_KEY")
cls.restore_config("STORAGE_SECRET")
cls.restore_config("STORAGE_CONTAINER")
def fetch_file(self, url):
client_response = self.client.get(url)
self.assertStatus(client_response, 302)
redirect_response = requests.get(client_response.location)
redirect_response.raise_for_status()
return redirect_response.headers, redirect_response.content
def verify_download_headers(self, headers, filename, content_type):
pass
test_prefix_expected_obj_name = 'test/fizz.txt'
test_malicious_directory_traversal_expected_obj_name = 'test/_/_/fizz.txt'
class GoogleCloudTestFile(CloudTestFile):
storage_provider = "GOOGLE_STORAGE"
key_env_name = "GCP_STORAGE_KEY"
secret_env_name = "GCP_STORAGE_SECRET"
container_env_name = "GCP_STORAGE_CONTAINER"
class AzureBlobTestFile(CloudTestFile):
storage_provider = "AZURE_BLOBS"
key_env_name = "AZURE_STORAGE_KEY"
secret_env_name = "AZURE_STORAGE_SECRET"
container_env_name = "AZURE_STORAGE_CONTAINER"
def test_simple(self):
reason = """
An issue in libcloud causes this to fail for Azure storage,
but the code being tested is not used for *any* cloud storage,
so it's safe to skip this test
"""
raise unittest.SkipTest(reason)
del CloudTestFile, TestFile
| apache-2.0 | 3,176,303,676,044,229,000 | 31.745455 | 115 | 0.701277 | false |
MeteorAdminz/pep8 | testsuite/test_api.py | 1 | 15704 | # -*- coding: utf-8 -*-
import os.path
import shlex
import sys
import unittest
import pycodestyle
from testsuite.support import ROOT_DIR, PseudoFile
E11 = os.path.join(ROOT_DIR, 'testsuite', 'E11.py')
class DummyChecker(object):
def __init__(self, tree, filename):
pass
def run(self):
if False:
yield
class APITestCase(unittest.TestCase):
"""Test the public methods."""
def setUp(self):
self._saved_stdout = sys.stdout
self._saved_stderr = sys.stderr
self._saved_checks = pycodestyle._checks
sys.stdout = PseudoFile()
sys.stderr = PseudoFile()
pycodestyle._checks = dict(
(k, dict((f, (vals[0][:], vals[1])) for (f, vals) in v.items()))
for (k, v) in self._saved_checks.items()
)
def tearDown(self):
sys.stdout = self._saved_stdout
sys.stderr = self._saved_stderr
pycodestyle._checks = self._saved_checks
def reset(self):
del sys.stdout[:], sys.stderr[:]
def test_register_physical_check(self):
def check_dummy(physical_line, line_number):
if False:
yield
pycodestyle.register_check(check_dummy, ['Z001'])
self.assertTrue(check_dummy in pycodestyle._checks['physical_line'])
codes, args = pycodestyle._checks['physical_line'][check_dummy]
self.assertTrue('Z001' in codes)
self.assertEqual(args, ['physical_line', 'line_number'])
options = pycodestyle.StyleGuide().options
self.assertTrue(any(func == check_dummy
for name, func, args in options.physical_checks))
def test_register_logical_check(self):
def check_dummy(logical_line, tokens):
if False:
yield
pycodestyle.register_check(check_dummy, ['Z401'])
self.assertTrue(check_dummy in pycodestyle._checks['logical_line'])
codes, args = pycodestyle._checks['logical_line'][check_dummy]
self.assertTrue('Z401' in codes)
self.assertEqual(args, ['logical_line', 'tokens'])
pycodestyle.register_check(check_dummy, [])
pycodestyle.register_check(check_dummy, ['Z402', 'Z403'])
codes, args = pycodestyle._checks['logical_line'][check_dummy]
self.assertEqual(codes, ['Z401', 'Z402', 'Z403'])
self.assertEqual(args, ['logical_line', 'tokens'])
options = pycodestyle.StyleGuide().options
self.assertTrue(any(func == check_dummy
for name, func, args in options.logical_checks))
def test_register_ast_check(self):
pycodestyle.register_check(DummyChecker, ['Z701'])
self.assertTrue(DummyChecker in pycodestyle._checks['tree'])
codes, args = pycodestyle._checks['tree'][DummyChecker]
self.assertTrue('Z701' in codes)
self.assertTrue(args is None)
options = pycodestyle.StyleGuide().options
self.assertTrue(any(cls == DummyChecker
for name, cls, args in options.ast_checks))
def test_register_invalid_check(self):
class InvalidChecker(DummyChecker):
def __init__(self, filename):
pass
def check_dummy(logical, tokens):
if False:
yield
pycodestyle.register_check(InvalidChecker, ['Z741'])
pycodestyle.register_check(check_dummy, ['Z441'])
for checkers in pycodestyle._checks.values():
self.assertTrue(DummyChecker not in checkers)
self.assertTrue(check_dummy not in checkers)
self.assertRaises(TypeError, pycodestyle.register_check)
def test_styleguide(self):
report = pycodestyle.StyleGuide().check_files()
self.assertEqual(report.total_errors, 0)
self.assertFalse(sys.stdout)
self.assertFalse(sys.stderr)
self.reset()
report = pycodestyle.StyleGuide().check_files(['missing-file'])
stdout = sys.stdout.getvalue().splitlines()
self.assertEqual(len(stdout), report.total_errors)
self.assertEqual(report.total_errors, 1)
# < 3.3 returns IOError; >= 3.3 returns FileNotFoundError
self.assertTrue(stdout[0].startswith("missing-file:1:1: E902 "))
self.assertFalse(sys.stderr)
self.reset()
report = pycodestyle.StyleGuide().check_files([E11])
stdout = sys.stdout.getvalue().splitlines()
self.assertEqual(len(stdout), report.total_errors)
self.assertEqual(report.total_errors, 17)
self.assertFalse(sys.stderr)
self.reset()
# Passing the paths in the constructor gives same result
report = pycodestyle.StyleGuide(paths=[E11]).check_files()
stdout = sys.stdout.getvalue().splitlines()
self.assertEqual(len(stdout), report.total_errors)
self.assertEqual(report.total_errors, 17)
self.assertFalse(sys.stderr)
self.reset()
def test_styleguide_options(self):
# Instantiate a simple checker
pep8style = pycodestyle.StyleGuide(paths=[E11])
# Check style's attributes
self.assertEqual(pep8style.checker_class, pycodestyle.Checker)
self.assertEqual(pep8style.paths, [E11])
self.assertEqual(pep8style.runner, pep8style.input_file)
self.assertEqual(pep8style.options.ignore_code, pep8style.ignore_code)
self.assertEqual(pep8style.options.paths, pep8style.paths)
# Check unset options
for o in ('benchmark', 'config', 'count', 'diff',
'doctest', 'quiet', 'show_pep8', 'show_source',
'statistics', 'testsuite', 'verbose'):
oval = getattr(pep8style.options, o)
self.assertTrue(oval in (None, False), msg='%s = %r' % (o, oval))
# Check default options
self.assertTrue(pep8style.options.repeat)
self.assertEqual(pep8style.options.benchmark_keys,
['directories', 'files',
'logical lines', 'physical lines'])
self.assertEqual(pep8style.options.exclude,
['.svn', 'CVS', '.bzr', '.hg',
'.git', '__pycache__', '.tox'])
self.assertEqual(pep8style.options.filename, ['*.py'])
self.assertEqual(pep8style.options.format, 'default')
self.assertEqual(pep8style.options.select, ())
self.assertEqual(pep8style.options.ignore, ('E226', 'E24'))
self.assertEqual(pep8style.options.max_line_length, 79)
def test_styleguide_ignore_code(self):
def parse_argv(argstring):
_saved_argv = sys.argv
sys.argv = shlex.split('pycodestyle %s /dev/null' % argstring)
try:
return pycodestyle.StyleGuide(parse_argv=True)
finally:
sys.argv = _saved_argv
options = parse_argv('').options
self.assertEqual(options.select, ())
self.assertEqual(
options.ignore,
('E121', 'E123', 'E126', 'E226', 'E24', 'E704', 'W503')
)
options = parse_argv('--doctest').options
self.assertEqual(options.select, ())
self.assertEqual(options.ignore, ())
options = parse_argv('--ignore E,W').options
self.assertEqual(options.select, ())
self.assertEqual(options.ignore, ('E', 'W'))
options = parse_argv('--select E,W').options
self.assertEqual(options.select, ('E', 'W'))
self.assertEqual(options.ignore, ('',))
options = parse_argv('--select E --ignore E24').options
self.assertEqual(options.select, ('E',))
self.assertEqual(options.ignore, ('',))
options = parse_argv('--ignore E --select E24').options
self.assertEqual(options.select, ('E24',))
self.assertEqual(options.ignore, ('',))
options = parse_argv('--ignore W --select E24').options
self.assertEqual(options.select, ('E24',))
self.assertEqual(options.ignore, ('',))
pep8style = pycodestyle.StyleGuide(paths=[E11])
self.assertFalse(pep8style.ignore_code('E112'))
self.assertFalse(pep8style.ignore_code('W191'))
self.assertTrue(pep8style.ignore_code('E241'))
pep8style = pycodestyle.StyleGuide(select='E', paths=[E11])
self.assertFalse(pep8style.ignore_code('E112'))
self.assertTrue(pep8style.ignore_code('W191'))
self.assertFalse(pep8style.ignore_code('E241'))
pep8style = pycodestyle.StyleGuide(select='W', paths=[E11])
self.assertTrue(pep8style.ignore_code('E112'))
self.assertFalse(pep8style.ignore_code('W191'))
self.assertTrue(pep8style.ignore_code('E241'))
pep8style = pycodestyle.StyleGuide(select=('F401',), paths=[E11])
self.assertEqual(pep8style.options.select, ('F401',))
self.assertEqual(pep8style.options.ignore, ('',))
self.assertFalse(pep8style.ignore_code('F'))
self.assertFalse(pep8style.ignore_code('F401'))
self.assertTrue(pep8style.ignore_code('F402'))
def test_styleguide_excluded(self):
pep8style = pycodestyle.StyleGuide(paths=[E11])
self.assertFalse(pep8style.excluded('./foo/bar'))
self.assertFalse(pep8style.excluded('./foo/bar/main.py'))
self.assertTrue(pep8style.excluded('./CVS'))
self.assertTrue(pep8style.excluded('./.tox'))
self.assertTrue(pep8style.excluded('./subdir/CVS'))
self.assertTrue(pep8style.excluded('__pycache__'))
self.assertTrue(pep8style.excluded('./__pycache__'))
self.assertTrue(pep8style.excluded('subdir/__pycache__'))
self.assertFalse(pep8style.excluded('draftCVS'))
self.assertFalse(pep8style.excluded('./CVSoup'))
self.assertFalse(pep8style.excluded('./CVS/subdir'))
def test_styleguide_checks(self):
pep8style = pycodestyle.StyleGuide(paths=[E11])
# Default lists of checkers
self.assertTrue(len(pep8style.options.physical_checks) > 4)
self.assertTrue(len(pep8style.options.logical_checks) > 10)
self.assertEqual(len(pep8style.options.ast_checks), 0)
# Sanity check
for name, check, args in pep8style.options.physical_checks:
self.assertEqual(check.__name__, name)
self.assertEqual(args[0], 'physical_line')
for name, check, args in pep8style.options.logical_checks:
self.assertEqual(check.__name__, name)
self.assertEqual(args[0], 'logical_line')
# Do run E11 checks
options = pycodestyle.StyleGuide().options
self.assertTrue(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
options = pycodestyle.StyleGuide(select=['E']).options
self.assertTrue(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
options = pycodestyle.StyleGuide(ignore=['W']).options
self.assertTrue(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
options = pycodestyle.StyleGuide(ignore=['E12']).options
self.assertTrue(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
# Do not run E11 checks
options = pycodestyle.StyleGuide(select=['W']).options
self.assertFalse(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
options = pycodestyle.StyleGuide(ignore=['E']).options
self.assertFalse(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
options = pycodestyle.StyleGuide(ignore=['E11']).options
self.assertFalse(any(func == pycodestyle.indentation
for name, func, args in options.logical_checks))
def test_styleguide_init_report(self):
style = pycodestyle.StyleGuide(paths=[E11])
standard_report = pycodestyle.StandardReport
self.assertEqual(style.options.reporter, standard_report)
self.assertEqual(type(style.options.report), standard_report)
class MinorityReport(pycodestyle.BaseReport):
pass
report = style.init_report(MinorityReport)
self.assertEqual(style.options.report, report)
self.assertEqual(type(report), MinorityReport)
style = pycodestyle.StyleGuide(paths=[E11], reporter=MinorityReport)
self.assertEqual(type(style.options.report), MinorityReport)
self.assertEqual(style.options.reporter, MinorityReport)
def test_styleguide_check_files(self):
pep8style = pycodestyle.StyleGuide(paths=[E11])
report = pep8style.check_files()
self.assertTrue(report.total_errors)
self.assertRaises(TypeError, pep8style.check_files, 42)
# < 3.3 raises TypeError; >= 3.3 raises AttributeError
self.assertRaises(Exception, pep8style.check_files, [42])
def test_check_unicode(self):
# Do not crash if lines are Unicode (Python 2.x)
pycodestyle.register_check(DummyChecker, ['Z701'])
source = '#\n'
if hasattr(source, 'decode'):
source = source.decode('ascii')
pep8style = pycodestyle.StyleGuide()
count_errors = pep8style.input_file('stdin', lines=[source])
self.assertFalse(sys.stdout)
self.assertFalse(sys.stderr)
self.assertEqual(count_errors, 0)
def test_check_nullbytes(self):
pycodestyle.register_check(DummyChecker, ['Z701'])
pep8style = pycodestyle.StyleGuide()
count_errors = pep8style.input_file('stdin', lines=['\x00\n'])
stdout = sys.stdout.getvalue()
if 'SyntaxError' in stdout:
# PyPy 2.2 returns a SyntaxError
expected = "stdin:1:2: E901 SyntaxError"
elif 'ValueError' in stdout:
# Python 3.5.
expected = "stdin:1:1: E901 ValueError"
else:
expected = "stdin:1:1: E901 TypeError"
self.assertTrue(stdout.startswith(expected),
msg='Output %r does not start with %r' %
(stdout, expected))
self.assertFalse(sys.stderr)
self.assertEqual(count_errors, 1)
def test_styleguide_unmatched_triple_quotes(self):
pycodestyle.register_check(DummyChecker, ['Z701'])
lines = [
'def foo():\n',
' """test docstring""\'\n',
]
pep8style = pycodestyle.StyleGuide()
pep8style.input_file('stdin', lines=lines)
stdout = sys.stdout.getvalue()
expected = 'stdin:2:5: E901 TokenError: EOF in multi-line string'
self.assertTrue(expected in stdout)
def test_styleguide_continuation_line_outdented(self):
pycodestyle.register_check(DummyChecker, ['Z701'])
lines = [
'def foo():\n',
' pass\n',
'\n',
'\\\n',
'\n',
'def bar():\n',
' pass\n',
]
pep8style = pycodestyle.StyleGuide()
count_errors = pep8style.input_file('stdin', lines=lines)
self.assertEqual(count_errors, 2)
stdout = sys.stdout.getvalue()
expected = (
'stdin:6:1: '
'E122 continuation line missing indentation or outdented'
)
self.assertTrue(expected in stdout)
expected = 'stdin:6:1: E302 expected 2 blank lines, found 1'
self.assertTrue(expected in stdout)
# TODO: runner
# TODO: input_file
| mit | 9,184,479,701,089,937,000 | 38.756962 | 78 | 0.61322 | false |
gencer/sentry | src/sentry/utils/auth.py | 1 | 8652 | """
sentry.utils.auth
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
import logging
from django.conf import settings
from django.contrib.auth import login as _login
from django.contrib.auth.backends import ModelBackend
from django.core.urlresolvers import reverse, resolve
from sudo.utils import is_safe_url
from time import time
from sentry.models import User, Authenticator
logger = logging.getLogger('sentry.auth')
_LOGIN_URL = None
SSO_SESSION_KEY = 'sso'
MFA_SESSION_KEY = 'mfa'
class AuthUserPasswordExpired(Exception):
def __init__(self, user):
self.user = user
def _make_key_value(val):
return val.strip().split('=', 1)
def parse_auth_header(header):
try:
return dict(map(_make_key_value, header.split(' ', 1)[1].split(',')))
except Exception:
return {}
def get_auth_providers():
return [
key for key, cfg_names in six.iteritems(settings.AUTH_PROVIDERS)
if all(getattr(settings, c, None) for c in cfg_names)
]
def get_pending_2fa_user(request):
rv = request.session.get('_pending_2fa')
if rv is None:
return
user_id, created_at = rv[:2]
if created_at < time() - 60 * 5:
return None
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
pass
def has_pending_2fa(request):
return request.session.get('_pending_2fa') is not None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(settings.LOGIN_URL)
except Exception:
_LOGIN_URL = settings.SENTRY_LOGIN_URL
else:
_LOGIN_URL = settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
# ensure type is coerced to string (to avoid lazy proxies)
_LOGIN_URL = six.text_type(_LOGIN_URL)
return _LOGIN_URL
def initiate_login(request, next_url=None):
for key in ('_next', '_after_2fa', '_pending_2fa'):
try:
del request.session[key]
except KeyError:
pass
if next_url:
request.session['_next'] = next_url
def get_login_redirect(request, default=None):
if default is None:
default = get_login_url()
# If there is a pending 2fa authentication bound to the session then
# we need to go to the 2fa dialog.
if has_pending_2fa(request):
return reverse('sentry-2fa-dialog')
# If we have a different URL to go after the 2fa flow we want to go to
# that now here.
after_2fa = request.session.pop('_after_2fa', None)
if after_2fa is not None:
return after_2fa
login_url = request.session.pop('_next', None)
if not login_url:
return default
if not is_valid_redirect(login_url, host=request.get_host()):
login_url = default
return login_url
def is_valid_redirect(url, host=None):
if not url:
return False
if url.startswith(get_login_url()):
return False
return is_safe_url(url, host=host)
def mark_sso_complete(request, organization_id):
# TODO(dcramer): this needs to be bound based on SSO options (e.g. changing
# or enabling SSO invalidates this)
sso = request.session.get(SSO_SESSION_KEY, '')
if sso:
sso = sso.split(',')
else:
sso = []
sso.append(six.text_type(organization_id))
request.session[SSO_SESSION_KEY] = ','.join(sso)
request.session.modified = True
def has_completed_sso(request, organization_id):
sso = request.session.get(SSO_SESSION_KEY, '').split(',')
return six.text_type(organization_id) in sso
def find_users(username, with_valid_password=True, is_active=None):
"""
Return a list of users that match a username
and falling back to email
"""
qs = User.objects
if is_active is not None:
qs = qs.filter(is_active=is_active)
if with_valid_password:
qs = qs.exclude(password='!')
try:
# First, assume username is an iexact match for username
user = qs.get(username__iexact=username)
return [user]
except User.DoesNotExist:
# If not, we can take a stab at guessing it's an email address
if '@' in username:
# email isn't guaranteed unique
return list(qs.filter(email__iexact=username))
return []
def login(request, user, passed_2fa=None, after_2fa=None, organization_id=None):
"""
This logs a user in for the sesion and current request.
If 2FA is enabled this method will start the MFA flow and return False as
required. If `passed_2fa` is set to `True` then the 2FA flow is set to be
finalized (user passed the flow).
If the session has already resolved MFA in the past, it will automatically
detect it from the session.
Optionally `after_2fa` can be set to a URL which will be used to override
the regular session redirect target directly after the 2fa flow.
Returns boolean indicating if the user was logged in.
"""
has_2fa = Authenticator.objects.user_has_2fa(user)
if passed_2fa is None:
passed_2fa = (request.session.get(MFA_SESSION_KEY, '')
== six.text_type(user.id))
if has_2fa and not passed_2fa:
request.session['_pending_2fa'] = [user.id, time(), organization_id]
if after_2fa is not None:
request.session['_after_2fa'] = after_2fa
request.session.modified = True
return False
# TODO(dcramer): this needs to be bound based on MFA options
if passed_2fa:
request.session[MFA_SESSION_KEY] = six.text_type(user.id)
request.session.modified = True
mfa_state = request.session.pop('_pending_2fa', ())
if organization_id is None and len(mfa_state) == 3:
organization_id = mfa_state[2]
# Check for expired passwords here after we cleared the 2fa flow.
# While this means that users will have to pass 2fa before they can
# figure out that their passwords are expired this is still the more
# reasonable behavior.
#
# We also rememebr _after_2fa here so that we can continue the flow if
# someone does it in the same browser.
if user.is_password_expired:
raise AuthUserPasswordExpired(user)
# If this User has a nonce value, we need to bind into the session.
if user.session_nonce is not None:
request.session['_nonce'] = user.session_nonce
# If there is no authentication backend, just attach the first
# one and hope it goes through. This apparently is a thing we
# have been doing for a long time, just moved it to a more
# reasonable place.
if not hasattr(user, 'backend'):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
_login(request, user)
if organization_id:
mark_sso_complete(request, organization_id)
log_auth_success(request, user.username, organization_id)
return True
def log_auth_success(request, username, organization_id=None):
logger.info(
'user.auth.success',
extra={
'ip_address': request.META['REMOTE_ADDR'],
'username': username,
'organization_id': organization_id,
}
)
def log_auth_failure(request, username=None):
logger.info(
'user.auth.fail', extra={
'ip_address': request.META['REMOTE_ADDR'],
'username': username,
}
)
def has_user_registration():
from sentry import features, options
return features.has('auth:register') and options.get('auth.allow-registration')
def is_user_signed_request(request):
"""
This function returns True if the request is a signed valid link
"""
try:
return request.user_from_signed_request
except AttributeError:
return False
class EmailAuthBackend(ModelBackend):
"""
Authenticate against django.contrib.auth.models.User.
Supports authenticating via an email address or a username.
"""
def authenticate(self, username=None, password=None):
users = find_users(username)
if users:
for user in users:
try:
if user.password and user.check_password(password):
return user
except ValueError:
continue
return None
| bsd-3-clause | -7,803,873,880,384,791,000 | 28.22973 | 85 | 0.642395 | false |
finron/finepy | fine/models/user.py | 1 | 9780 | #!/usr/bin/env python
# coding:utf-8
"""
user.py
~~~~~~~
"""
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
# import bleach
from flask import current_app, request, url_for
from flask.ext.login import UserMixin, AnonymousUserMixin
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
from random import seed
import forgery_py
from fine.exceptions import ValidationError
from permission import Permission, Role
from fine import db, login_manager
# class Follow(db.Model):
# __tablename__ = 'follows'
# follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
# primary_key=True)
# followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
# primary_key=True)
# timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(42), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
social_id = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer)
role = db.relationship('Role', foreign_keys=[role_id],
primaryjoin='User.role_id == Role.id',
backref='users')
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.String(128))
member_since = db.Column(db.DateTime, default=datetime.utcnow)
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
# followed = db.relationship('Follow',
# foreign_keys=[Follow.follower_id],
# backref=db.backref('follower', lazy='joined'),
# lazy='dynamic',
# cascade='all, delete-orphan')
# followers = db.relationship('Follow',
# foreign_keys=[Follow.followed_id],
# backref=db.backref('followed', lazy='joined'),
# lazy='dynamic',
# cascade='all, delete-orphan')
@staticmethod
def generate_fake(count=12):
seed()
u_query = User.query
for i in xrange(count):
email = forgery_py.internet.email_address()
username=forgery_py.internet.user_name(True)
user = u_query.filter(or_(User.email==email,
User.username==username)).first()
if user:
continue
u = User(email=email,
username=username,
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
u.avatar_hash = u.gravatar()
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
# @staticmethod
# def add_self_follows():
# for user in User.query.all():
# if not user.is_following(user):
# user.follow(user)
# db.session.add(user)
# db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if not self.role:
if self.email == current_app.config['FINEPY_ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMIN).first()
if not self.role:
self.role = Role.query.filter_by(default=True).first()
if self.email and not self.avatar_hash:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
# self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not readable')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'],expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if not new_email:
return False
if self.query.filter_by(email=new_email).first():
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return (self.role and
(self.role.permissions & permissions) == permissions)
def is_admin(self):
return self.can(Permission.ADMIN)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=24, default='identicon', rating='g'):
"""Generate avatar
#TODO maybe changed to https://github.com/maethor/avatar-generator
"""
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://cn.gravatar.com/avatar'
# GitHub login cannot get email when user has no public email
# use username as avatar
hash = self.avatar_hash
if not hash:
if self.email:
hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
else:
hash = hashlib.md5(
self.username.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# def follow(self, user):
# if not self.is_following(user):
# f = Follow(follower=self, followed=user)
# db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first()
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first()
# @property
# def followed_posts(self):
# return Post.query.join(Follow,
# Follow.followed_id == Post.author_id).filter(
# Follow.follower_id == self.id)
def to_json(self):
json_user = {
'url': url_for('api.get_post', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expiration=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %d>' % self.id
class AnonymUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| bsd-3-clause | -4,960,597,024,237,958,000 | 34.053763 | 79 | 0.566667 | false |
kajojify/heralding | heralding/reporting/syslog_logger.py | 1 | 1277 | # Copyright (C) 2017 Johnny Vestergaard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import syslog
import logging
from heralding.reporting.base_logger import BaseLogger
logger = logging.getLogger(__name__)
class SyslogLogger(BaseLogger):
def __init__(self):
super().__init__()
logger.debug('Syslog logger started')
def handle_log_data(self, data):
message = "Authentication from {0}:{1}, with username: {2} " \
"and password: {3}.".format(data['source_ip'], data['source_port'],
data['username'], data['password'])
syslog.syslog(syslog.LOG_ALERT, message)
| gpl-3.0 | -5,035,940,351,151,663,000 | 37.69697 | 85 | 0.684417 | false |
uclouvain/osis | assessments/views/scores_responsible.py | 1 | 4581 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django_filters.views import FilterView
from assessments.api.serializers.scores_responsible import ScoresResponsibleListSerializer
from assessments.forms.scores_responsible import ScoresResponsibleFilter
from attribution import models as mdl_attr
from attribution.business.score_responsible import get_attributions_data
from attribution.models.attribution import Attribution
from base.models import session_exam_calendar
from base.models.learning_unit_year import LearningUnitYear
from base.utils.cache import CacheFilterMixin
from osis_role.contrib.views import permission_required
class ScoresResponsibleSearch(LoginRequiredMixin, PermissionRequiredMixin, CacheFilterMixin, FilterView):
model = LearningUnitYear
paginate_by = 20
template_name = "scores_responsible/list.html"
filterset_class = ScoresResponsibleFilter
permission_required = 'assessments.view_scoresresponsible'
def get_filterset_kwargs(self, filterset_class):
return {
**super().get_filterset_kwargs(filterset_class),
'academic_year': session_exam_calendar.current_sessions_academic_year()
}
def render_to_response(self, context, **response_kwargs):
if self.request.is_ajax():
serializer = ScoresResponsibleListSerializer(context['object_list'], many=True)
return JsonResponse({'object_list': serializer.data})
return super().render_to_response(context, **response_kwargs)
@login_required
@permission_required('assessments.change_scoresresponsible', raise_exception=True)
def scores_responsible_management(request):
context = {
'course_code': request.GET.get('course_code'),
'learning_unit_title': request.GET.get('learning_unit_title'),
'tutor': request.GET.get('tutor'),
'scores_responsible': request.GET.get('scores_responsible')
}
learning_unit_year_id = request.GET.get('learning_unit_year').strip('learning_unit_year_')
attributions_data = get_attributions_data(request.user, learning_unit_year_id, '-score_responsible')
context.update(attributions_data)
return render(request, 'scores_responsible_edit.html', context)
@login_required
@permission_required('assessments.change_scoresresponsible', raise_exception=True)
def scores_responsible_add(request, pk):
if request.POST.get('action') == "add":
mdl_attr.attribution.clear_scores_responsible_by_learning_unit_year(pk)
if request.POST.get('attribution'):
attribution_id = request.POST.get('attribution').strip('attribution_')
attribution = Attribution.objects.get(pk=attribution_id)
attributions = mdl_attr.attribution.Attribution.objects \
.filter(learning_unit_year=attribution.learning_unit_year) \
.filter(tutor=attribution.tutor)
for a_attribution in attributions:
a_attribution.score_responsible = True
a_attribution.save()
return HttpResponseRedirect(reverse('scores_responsible_list'))
| agpl-3.0 | 3,867,256,165,718,595,600 | 47.723404 | 105 | 0.713974 | false |
deepmind/open_spiel | open_spiel/python/pytorch/nfsp_pytorch_test.py | 1 | 3036 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.nfsp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_utils import TestCase
from open_spiel.python import rl_environment
from open_spiel.python.pytorch import nfsp
class NFSPTest(TestCase):
def test_run_kuhn(self):
env = rl_environment.Environment("kuhn_poker")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agents = [
nfsp.NFSP( # pylint: disable=g-complex-comprehension
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
reservoir_buffer_capacity=10,
anticipatory_param=0.1) for player_id in [0, 1]
]
for unused_ep in range(10):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
class ReservoirBufferTest(TestCase):
def test_reservoir_buffer_add(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=10)
self.assertEqual(len(reservoir_buffer), 0)
reservoir_buffer.add("entry1")
self.assertEqual(len(reservoir_buffer), 1)
reservoir_buffer.add("entry2")
self.assertEqual(len(reservoir_buffer), 2)
self.assertIn("entry1", reservoir_buffer)
self.assertIn("entry2", reservoir_buffer)
def test_reservoir_buffer_max_capacity(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=2)
reservoir_buffer.add("entry1")
reservoir_buffer.add("entry2")
reservoir_buffer.add("entry3")
self.assertEqual(len(reservoir_buffer), 2)
def test_reservoir_buffer_sample(self):
replay_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
if __name__ == "__main__":
run_tests()
| apache-2.0 | 5,683,369,881,241,719,000 | 32.362637 | 74 | 0.70191 | false |
kudrom/lupulo | lupulo/exceptions.py | 1 | 1384 | # -*- encoding: utf-8 -*-
# Copyright (C) 2015 Alejandro López Espinosa (kudrom)
class NotFoundDescriptor(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "Descriptor %s couldn't have been found" % self.name
class NotListenerFound(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "Listener %s couldn't have been found" % self.name
class InvalidListener(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "Listener %s is not a subclass of twisted Service" % self.name
class RequirementViolated(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return self.message
class RequiredAttributes(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "The layout %s lacks of required attributes."
class UrlInvalid(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class InvalidResource(Exception):
def __init__(self, url, name):
self.url = url
self.name = name
def __str__(self):
return "%s class is not a subclass of LupuloResource " \
"so %s is discarded as a valid url" % (self.name, self.url)
| gpl-2.0 | -4,826,174,345,010,988,000 | 22.440678 | 77 | 0.597975 | false |
edward-sihler/ais100wGPS | ais100GPS-time.py | 1 | 2082 | #! /usr/bin/python
# by edward silher for collecting gps data in conjuction with AIS data
# [email protected]
import serial
import subprocess
import os
from gps import *
from time import *
import time
import threading
gpsd = None #seting the global variable
os.system('clear') #clear the terminal (optional)
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
# def utc(self):
# return gpsd.utc
def main (argv):
#find the port with the AIS reciver on it
usbPort = subprocess.check_output("dmesg | grep \"FTDI.*now attached to ttyUSB\"", shell=True)
i = usbPort.rfind("ttyUSB")
aisPort = '/dev/' + usbPort[i:].strip()
#aisPort = '/dev/ttyUSB0'
ais = serial.Serial(aisPort, 38400, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE)
global gpsp
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
while True:
#It may take a second or two to get good data
#print gpsd.fix.latitude,', ',gpsd.fix.longitude,' Time: ',gpsd.utc
#os.system('clear')
msg = ''
msg = str(gpsd.utc)
msg += ", " + str(gpsd.fix.latitude)
msg += ", " + str(gpsd.fix.longitude )
print (msg)
#try:
# msg += ", " + ais.readline().strip()
# print(msg)
#except serial.SerialException:
# print(msg)
# time.sleep(5)
#print (msg)
#time.sleep(5) #set to whatever
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 | 9,214,501,873,434,423,000 | 27.135135 | 121 | 0.646974 | false |
intel-analytics/analytics-zoo | pyzoo/zoo/models/recommendation/recommender.py | 1 | 4590 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import RDD
from zoo.models.common import *
from zoo.common.utils import callZooFunc
if sys.version >= '3':
long = int
unicode = str
class UserItemFeature(object):
"""
Represent records of user-item with features.
Each record should contain the following fields:
user_id: Positive int.
item_id: Positive int.
sample: Sample which consists of feature(s) and label(s).
"""
def __init__(self, user_id, item_id, sample, bigdl_type="float"):
self.user_id = int(user_id)
self.item_id = int(item_id)
self.sample = sample
self.bigdl_type = bigdl_type
def __reduce__(self):
return UserItemFeature, (self.user_id, self.item_id, self.sample)
def __str__(self):
return "UserItemFeature [user_id: %s, item_id: %s, %s]" % (
self.user_id, self.item_id, self.sample)
class UserItemPrediction(object):
"""
Represent the prediction results of user-item pairs.
Each prediction record will contain the following information:
user_id: Positive int.
item_id: Positive int.
prediction: The prediction (rating) for the user on the item.
probability: The probability for the prediction.
"""
def __init__(self, user_id, item_id, prediction, probability, bigdl_type="float"):
self.user_id = user_id
self.item_id = item_id
self.prediction = prediction
self.probability = probability
self.bigdl_type = bigdl_type
def __reduce__(self):
return UserItemPrediction, (self.user_id, self.item_id, self.prediction, self.probability)
def __str__(self):
return "UserItemPrediction [user_id: %s, item_id: %s, prediction: %s, probability: %s]" % (
self.user_id, self.item_id, self.prediction, self.probability)
class Recommender(KerasZooModel):
"""
The base class for recommendation models in Analytics Zoo.
"""
def predict_user_item_pair(self, feature_rdd):
"""
Predict for user-item pairs.
# Arguments
feature_rdd: RDD of UserItemFeature.
:return RDD of UserItemPrediction.
"""
result_rdd = callZooFunc(self.bigdl_type, "predictUserItemPair",
self.value,
self._to_tuple_rdd(feature_rdd))
return self._to_prediction_rdd(result_rdd)
def recommend_for_user(self, feature_rdd, max_items):
"""
Recommend a number of items for each user.
# Arguments
feature_rdd: RDD of UserItemFeature.
max_items: The number of items to be recommended to each user. Positive int.
:return RDD of UserItemPrediction.
"""
result_rdd = callZooFunc(self.bigdl_type, "recommendForUser",
self.value,
self._to_tuple_rdd(feature_rdd),
int(max_items))
return self._to_prediction_rdd(result_rdd)
def recommend_for_item(self, feature_rdd, max_users):
"""
Recommend a number of users for each item.
# Arguments
feature_rdd: RDD of UserItemFeature.
max_users: The number of users to be recommended to each item. Positive int.
:return RDD of UserItemPrediction.
"""
result_rdd = callZooFunc(self.bigdl_type, "recommendForItem",
self.value,
self._to_tuple_rdd(feature_rdd),
int(max_users))
return self._to_prediction_rdd(result_rdd)
@staticmethod
def _to_tuple_rdd(feature_rdd):
assert isinstance(feature_rdd, RDD), "feature_rdd should be RDD of UserItemFeature"
return feature_rdd.map(lambda x: (x.user_id, x.item_id, x.sample))
@staticmethod
def _to_prediction_rdd(result_rdd):
return result_rdd.map(lambda y: UserItemPrediction(int(y[0]), int(y[1]), int(y[2]), y[3]))
| apache-2.0 | 1,862,403,754,328,875,000 | 33.253731 | 99 | 0.622876 | false |
botswana-harvard/edc-calendar | edc_calendar/models/event.py | 1 | 1658 | from django.db import models
from django.urls.base import reverse
from edc_base.model_mixins import BaseUuidModel
from edc_base.sites import SiteModelMixin
from edc_search.model_mixins import SearchSlugManager
from edc_search.model_mixins import SearchSlugModelMixin as Base
from django.conf import settings
class SearchSlugModelMixin(Base):
def get_search_slug_fields(self):
fields = super().get_search_slug_fields()
fields.append('title')
return fields
class Meta:
abstract = True
class ContactManager(SearchSlugManager, models.Manager):
def get_by_natural_key(self, mobile_number):
return self.get(subject_identifier='subject_identifier')
class Event(
SiteModelMixin, SearchSlugModelMixin, BaseUuidModel):
subject_identifier = models.CharField(
verbose_name="Subject Identifier",
max_length=50)
title = models.CharField(
verbose_name='Event Title',
max_length=200)
description = models.TextField(
verbose_name='Event description')
start_time = models.DateTimeField(
verbose_name='Start date and time')
end_time = models.DateTimeField(
verbose_name='End date and time')
def __str__(self):
return f'{self.subject_identifier} {self.title}'
class Meta:
app_label = "edc_calendar"
@property
def get_html_url(self):
dashboard_url = settings.DASHBOARD_URL_NAMES.get('subject_dashboard_url')
url = reverse(f'{dashboard_url}', kwargs={'subject_identifier': self.subject_identifier})
return f'<a href="{url}">{self.subject_identifier}: {self.title}</a>'
| gpl-2.0 | 6,785,288,702,807,448,000 | 27.586207 | 97 | 0.687575 | false |
lnhubbell/tweetTrack | tweetTrack/app/views.py | 1 | 2937 | from os import environ
from random import random
import json
import tweepy
import requests
from requests.exceptions import ConnectionError
from flask import render_template, request, jsonify
from flask.ext.mail import Message
from tweetTrack.app import app, mail, db
from tweetTrack.app.forms import TwitterForm, ContactForm
from tweetTrack.app.forms import UserResponseForm, APIRequestForm
from tweetTrack.app.models import UserResponse
@app.route('/')
@app.route('/index')
def index():
contact_form = ContactForm()
twitter_form = TwitterForm()
api_request_form = APIRequestForm()
user_response_form = UserResponseForm()
return render_template(
'index.html',
contact_form=contact_form,
twitter_form=twitter_form,
api_request_form=api_request_form,
user_response_form=user_response_form
)
@app.route('/twitter/<user_name>')
def user_tweets(user_name):
try:
url = app.config['TRACKING_API_URL']
data = json.dumps({'screen_name': user_name})
headers = {
'Content-Type': 'application/json',
'Content-Length': len(data)
}
response = requests.post(url, data=data, headers=headers)
response.raise_for_status()
return jsonify(response.json())
except ConnectionError:
pass
# I know these methods look a lot alike, which means I should have made
# a request builder function but there is only so many hours in the day
@app.route('/api-request/<email>', methods=['GET', 'POST'])
def api_request(email):
try:
url = app.config['REQUEST_API_URL']
data = json.dumps({'email': email})
headers = {
'Content-Type': 'application/json',
'Content-Length': len(data)
}
response = requests.get(url, data=data, headers=headers)
response.raise_for_status()
print(response.json().success)
return jsonify(response=response.json())
except ConnectionError:
return '<p>Something went wrong with you request</p>'
@app.route('/response')
def collect_response():
name = request.args.get('name', False)
response = request.args.get('response', False)
prediction = request.args.get('prediction', False)
user_response = UserResponse(name, response, prediction)
db.session.add(user_response)
db.session.commit()
return 'Done'
@app.route('/contact/', methods=['GET', 'POST'])
def contact():
name = request.args.get('name', 'Name error')
subject = request.args.get('subject', 'Subject Error')
email = request.args.get('email', 'Email Error')
full_subject = '{} - From: {} @ {}'.format(subject, name, email)
msg = Message(
full_subject,
sender=email,
recipients=['[email protected]']
)
msg.body = request.args.get('message', 'Message error')
mail.send(msg)
return render_template('message_sent.html', name=name)
| mit | 8,270,377,790,039,223,000 | 32 | 71 | 0.657814 | false |
imortkz/XmlParse | XmlParse.py | 1 | 19190 | #! /usr/bin/python3.4
#! /usr/bin/python
#
# test XML parse class
# Copyright (c) 2015 Valentin Kim
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import os
import subprocess
import platform
import urllib.request as ur
import xml.etree.ElementTree as et
import re
import unittest
# класс реализующий разбор XML документа согласно спецификации, см. README
class XmlParse:
# инициализация класса, создание словаря с кодами и описанием ошибок, и констант
def __init__(self):
self.errors = {100: 'в качестве параметра передана не строка или параметр пустой.',
101: 'не удаётся открыть HTTP ссылку.',
102: 'не удаётся открыть файл.',
103: 'параметру max_size передано недопустимое значение',
104: 'параметру parent_tag передано недопустимое значение',
105: 'параметру parent_attr передано недопустимое значение',
106: 'параметру child_tag передано недопустимое значение',
107: 'параметру child_attr передано недопустимое значение',
200: 'документ не является валидным документом XML.',
201: 'размер документа превышает допустимый.',
202: 'у элемента отсутствует заданный атрибут',
203: 'есть элементы с дублирующимися значениями заданного атрибута',
}
self.__max_size = 16384
self.__parent_tag = ''
self.__parent_attr = ''
self.__child_tag = ''
self.__child_attr = ''
self.__xml_text = ''
# свойство max_size - максимальный размер XML документа для обработки
@property
def max_size(self):
return self.__max_size
@max_size.setter
def max_size(self, size):
if not isinstance(size, int):
raise XmlParseException(self.__error(103, size), 103)
if size == 0 or size < 0 or size >= sys.maxsize:
raise XmlParseException(self.__error(103, size), 103)
self.__max_size = int(size)
# свойство parent_tag - родительский тег для разбора
@property
def parent_tag(self):
return self.__parent_tag
@parent_tag.setter
def parent_tag(self, tag_name):
if not self.__check_str_input (tag_name):
raise XmlParseException(self.__error(104, tag_name), 104)
else:
self.__parent_tag = tag_name
# свойство parent_attr - родительский тег для разбора
@property
def parent_attr(self):
return self.__parent_attr
@parent_attr.setter
def parent_attr(self, tag_name):
if not self.__check_str_input (tag_name):
raise XmlParseException(self.__error(105, tag_name), 105)
else:
self.__parent_attr = tag_name
# свойство child_tag - тег потомка для разбора
@property
def child_tag(self):
return self.__child_tag
@child_tag.setter
def child_tag(self, tag_name):
if not self.__check_str_input (tag_name):
raise XmlParseException(self.__error(106, tag_name), 106)
else:
self.__child_tag = tag_name
# свойство parent_attr - родительский тег для разбора
@property
def child_attr(self):
return self.__child_attr
@child_attr.setter
def child_attr(self, tag_name):
if not self.__check_str_input (tag_name):
raise XmlParseException(self.__error(107, tag_name), 107)
else:
self.__child_attr = tag_name
# проверка строкового параметра на корректность
def __check_str_input(self, tag_name):
if not isinstance(tag_name, str):
return False
if (len(tag_name) == 0) or (len(tag_name) >= sys.maxsize):
return False
return True
# формирование поясняющего сообщения об ошибке для исключения
def __error(self, error_code, description):
return str(error_code)+' - '+self.errors[error_code] + ' (' + str(description) + ')'
# проверяем входящие данные на базовую корректность в случае локального файла
def __check_file(self, filename):
# содержит ли входной параметр данные
if len(filename) != 0:
# существует ли файл по указанному пути, есть ли права его открыть на чтение
try:
xmlfile = open(filename, 'rt')
except:
raise XmlParseException(self.__error(102, filename), 102)
# укладывается ли размер файла в разрешённый диапазон
if os.path.getsize(filename) > self.__max_size:
raise XmlParseException(self.__error(201, str(os.path.getsize(filename))+' > ' +
str(self.__max_size)), 201)
# базовая проверка XML документа на валидность
try:
self.__xml_text = xmlfile.read()
et.fromstring(self.__xml_text)
except:
raise XmlParseException(self.__error(200, filename), 200)
else:
raise XmlParseException(self.__error(100, filename), 100)
xmlfile.close()
return 0
# проверяем входящие данные на базовую корректность в случае HTTP ссылки
def __check_address(self, address):
if len(address) != 0:
# проверяем URL на корректность
try:
conn = ur.urlopen(address)
except:
raise XmlParseException(self.__error(101, address), 101)
# проверим код ответа на HTTP запрос, если он отличается от 200 OK, возвращаем исключение
if conn.getcode() != 200:
raise XmlParseException(self.__error(101, address), 101)
# проверим размер HTTP документа
headers = conn.info()
if int(headers.get('Content-Length')) > self.__max_size:
raise XmlParseException(self.__error(201, headers.get('Content-Length')), 201)
else:
raise XmlParseException(self.__error(100, address), 100)
conn = ur.urlopen(address)
self.__xml_text = conn.read()
return 0
# проверка XML файла на валидность и соответствие требованиям спецификации
def __check_xml(self, xml_string):
# если размер XML документа более 16 Кбайт
if len(xml_string) > self.__max_size:
raise XmlParseException(self.__error(201, len(xml_string)), 201)
try:
root = et.fromstring(xml_string)
except:
raise XmlParseException(self.__error(200, xml_string), 200)
parent_attr_list = []
if not self.__check_str_input (self.parent_tag):
raise XmlParseException(self.__error(104, self.parent_tag), 104)
if not self.__check_str_input (self.parent_attr):
raise XmlParseException(self.__error(105, self.parent_tag), 105)
if not self.__check_str_input (self.child_tag):
raise XmlParseException(self.__error(106, self.parent_tag), 106)
if not self.__check_str_input (self.child_attr):
raise XmlParseException(self.__error(107, self.parent_tag), 107)
# находим все элементы parent_tag
for parent_tag in root.iter(self.parent_tag):
# проверка на наличие атрибута parent_attr в элементе parent_tag
if self.parent_attr not in parent_tag.attrib:
raise XmlParseException(self.__error(202, str(self.parent_tag)+str(parent_tag.attrib)), 202)
# проверка на уникальность атрибута parent_attr в ноде parent_tag
if parent_tag.attrib[self.parent_attr] in parent_attr_list:
raise XmlParseException(self.__error(203, str(parent_tag.attrib[self.parent_attr])), 203)
parent_attr_list.append(parent_tag.attrib[self.parent_attr])
for child in parent_tag:
if child.tag == self.child_tag:
# проверка на наличие атрибута ID в ноде tuningSetup
if self.child_attr not in child.attrib:
raise XmlParseException(self.__error(202, str(child.attrib)), 202)
return 0
# разбор XML документа из строки
def __parse_xml(self, xml_string):
result = {}
temp = []
root = et.fromstring(xml_string)
for parent_tag in root.iter(self.parent_tag):
for child_tag in parent_tag:
if child_tag.tag == self.child_tag:
temp.append(child_tag.attrib[self.child_attr])
if len(temp) != 0:
result[parent_tag.attrib[self.child_attr]] = temp.copy()
temp.clear()
return result
# публичный метод разбора XML документа согласно спецификации. На входе: URL или локальный путь к файлу.
def parse(self, user_input):
# это строка?
if not isinstance(user_input, str):
raise XmlParseException(self.__error(100, user_input), 100)
# это URL?
parsed_input = re.findall('^http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
user_input)
if len(parsed_input) == 1:
# в качестве параметра передан URL
# проверяем входящие данные на базовую корректность
self.__check_address(parsed_input[0])
# проверяем XML на ошибки семантики
self.__check_xml(self.__xml_text)
# извлекаем данные
return self.__parse_xml(self.__xml_text)
else:
# в качестве параметра передан путь к локальному файлу
# проверяем входящие данные на базовую корректность
self.__check_file(user_input)
# проверяем XML на ошибки семантики
self.__check_xml(self.__xml_text)
# извлекаем данные
return self.__parse_xml(self.__xml_text)
# класс исключения для xmlParse()
class XmlParseException(Exception):
def __init__(self, message, error):
self.message = message
self.error_code = error
# класс юнит-тестов для xmlParse()
class XmlParseTest (unittest.TestCase):
def setUp(self):
# запускаем локальный web сервер (для каждого теста он стартует и завершает работу)
self.xmlParse = XmlParse()
self.localPrefix = './XmlParseTest/'
self.xmlParse.parent_tag = 'input'
self.xmlParse.parent_attr = 'id'
self.xmlParse.child_tag = 'tuningSetup'
self.xmlParse.child_attr = 'id'
def tearDown(self):
pass
def __test_input(self, xml_string):
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.parse(xml_string)
return cm.exception.error_code
# 1. Тестирование разбора корректного XML документа
# 1.1. Предоставленный исходный текст
# 1.2. Документ с несколькими нодами <input> без вложенных нод <tuningSetup>
# 1.3. Документ с несколькими нодами <input>, одна из которых вложена в другую, и содержит в себе ноду <tuningSetup>
# 1.4. Документ с нодой <tuningSetup> вложенной в другую ноду <tuningSetup>
# 1.5. Документ с нодой <input> и вложенной нодой <tuningSetup>, вложенные произвольно глубоко.
def test_1_1(self):
test_xml = 'source.xml'
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_xml),
{'100': ['0'], '1': ['1', '2', '3'], '2': ['1']})
def test_1_2(self):
test_xml = 'test_1_2.xml'
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_xml), {})
def test_1_3(self):
test_xml = 'test_1_3.xml'
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_xml), {'1': ['1'], '2': ['2']})
def test_1_4(self):
test_xml = 'test_1_4.xml'
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_xml), {'2': ['2'], '1': ['1']})
def test_1_5(self):
test_xml = 'test_1_5.xml'
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_xml), {'1': ['1', '4'], '2': ['2'], '3': ['1']})
# 2. Тестирование входных параметров
# 2.1. Параметр user_input неверного типа
# 2.1.1. Число
# 2.1.2. Список
# 2.1.3. Null
# 2.1.4. Отрицательное число
# 2.1.5. Пустой словарь
# 2.1.6. Словарь с элементами
# 2.2. Несуществующий файл, некорректный URL
# 2.3. Несуществующий URL
# 2.4. Слишком большой XML документ
# 2.5. Слишком большой документ (при увеличении xmlParse.maxSize)
# 2.6. Некорректное значение XmlParse.maxSize
# 2.7. Некорректное значение XmlParse.parent_tag
# 2.8. Некорректное значение XmlParse.parent_attr
# 2.9. Некорректное значение XmlParse.child_tag
# 2.10. Некорректное значение XmlParse.child_attr
def test_2_1(self):
test_data = [42, ['1', '2'], '', -1, {}, {'1': '2'}]
for test_input in test_data:
self.assertEqual(self.__test_input(test_input), 100)
def test_2_2(self):
self.assertEqual(self.__test_input('./XmlParseTest/nonexisted.file'), 102)
self.assertEqual(self.__test_input('www.google.com'), 102)
def test_2_3(self):
self.assertEqual(self.__test_input('http://nonexist.url'), 101)
def test_2_4(self):
test_xml = 'test_2_4.xml'
self.assertEqual(self.__test_input(self.localPrefix + test_xml), 201)
def test_2_5(self):
self.xmlParse.max_size = 32768
test_files = ['test_2_4.xml', 'test_2_5.xml']
self.assertEqual(self.xmlParse.parse(self.localPrefix + test_files[0]), {})
self.assertEqual(self.__test_input(self.localPrefix + test_files[1]), 201)
def test_2_6(self):
test_data = [0, -42, 'test', []]
for test_size in test_data:
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.max_size = test_size
self.assertEqual(cm.exception.error_code, 103)
def test_2_7(self):
test_data = [0, -42, '', []]
for test_tag in test_data:
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.parent_tag = test_tag
self.assertEqual(cm.exception.error_code, 104)
def test_2_8(self):
test_data = [0, -42, '', []]
for test_attr in test_data:
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.parent_attr = test_attr
self.assertEqual(cm.exception.error_code, 105)
def test_2_9(self):
test_data = [0, -42, '', []]
for test_tag in test_data:
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.child_tag = test_tag
self.assertEqual(cm.exception.error_code, 106)
def test_2_10(self):
test_data = [0, -42, '', []]
for test_attr in test_data:
with self.assertRaises(XmlParseException) as cm:
self.xmlParse.child_attr = test_attr
self.assertEqual(cm.exception.error_code, 107)
# 3. Тестирование разбора некорректного XML документа
# 3.1. Невалидный XML документ
# 3.2. Есть нода parent_tag без атрибута parent_attr
# 3.3. Есть нода child_tag без атрибута child_attr
# 3.4. Есть нода parent_tag с дублирующимся атрибутом parent_attr
def test_3_1(self):
test_files = ['test_3_1_1.xml', 'test_3_1_2.xml', 'test_3_1_3.xml']
for test_xml in test_files:
self.assertEqual(self.__test_input(self.localPrefix + test_xml), 200)
def test_3_2(self):
test_xml = 'test_3_2.xml'
self.assertEqual(self.__test_input(self.localPrefix + test_xml), 202)
def test_3_3(self):
test_xml = 'test_3_4.xml'
self.assertEqual(self.__test_input(self.localPrefix + test_xml), 202)
def test_3_4(self):
test_xml = 'test_3_6.xml'
self.assertEqual(self.__test_input(self.localPrefix + test_xml), 203)
if __name__ == '__main__':
unittest.main() | gpl-3.0 | 2,490,514,187,270,711,000 | 40.273632 | 120 | 0.603882 | false |
mach327/chirp_fork | chirp/drivers/vgc.py | 1 | 49672 | # Copyright 2016:
# * Jim Unroe KC9HI, <[email protected]>
# * Pavel Milanes CO7WT <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import struct
import logging
import re
LOG = logging.getLogger(__name__)
from chirp import chirp_common, directory, memmap
from chirp import bitwise, errors, util
from chirp.settings import RadioSettingGroup, RadioSetting, \
RadioSettingValueBoolean, RadioSettingValueList, \
RadioSettingValueString, RadioSettingValueInteger, \
RadioSettingValueFloat, RadioSettings
from textwrap import dedent
MEM_FORMAT = """
struct mem {
lbcd rxfreq[4];
lbcd txfreq[4];
lbcd rxtone[2];
lbcd txtone[2];
u8 unknown0:2,
txp:2,
wn:2,
unknown1:1,
bcl:1;
u8 unknown2:2,
revert:1,
dname:1,
unknown3:4;
u8 unknown4[2];
};
struct nam {
char name[6];
u8 unknown1[2];
};
#seekto 0x0000;
struct mem left_memory[500];
#seekto 0x2000;
struct mem right_memory[500];
#seekto 0x4000;
struct nam left_names[500];
#seekto 0x5000;
struct nam right_names[500];
#seekto 0x6000;
u8 left_usedflags[64];
#seekto 0x6040;
u8 left_scanflags[64];
#seekto 0x6080;
u8 right_usedflags[64];
#seekto 0x60C0;
u8 right_scanflags[64];
#seekto 0x6160;
struct {
char line32[32];
} embedded_msg;
#seekto 0x6180;
struct {
u8 sbmute:2, // sub band mute
unknown1:1,
workmodb:1, // work mode (right side)
dw:1, // dual watch
audio:1, // audio output mode (stereo/mono)
unknown2:1,
workmoda:1; // work mode (left side)
u8 scansb:1, // scan stop beep
aftone:3, // af tone control
scand:1, // scan directon
scanr:3; // scan resume
u8 rxexp:1, // rx expansion
ptt:1, // ptt mode
display:1, // display select (frequency/clock)
omode:1, // operaton mode
beep:2, // beep volume
spkr:2; // speaker
u8 cpuclk:1, // operating mode(cpu clock)
fkey:3, // fkey function
mrscan:1, // memory scan type
color:3; // lcd backlight color
u8 vox:2, // vox
voxs:3, // vox sensitivity
mgain:3; // mic gain
u8 wbandb:4, // work band (right side)
wbanda:4; // work band (left side)
u8 sqlb:4, // squelch level (right side)
sqla:4; // squelch level (left side)
u8 apo:4, // auto power off
ars:1, // automatic repeater shift
tot:3; // time out timer
u8 stepb:4, // auto step (right side)
stepa:4; // auto step (left side)
u8 rxcoverm:1, // rx coverage-memory
lcdc:3, // lcd contrast
rxcoverv:1, // rx coverage-vfo
lcdb:3; // lcd brightness
u8 smode:1, // smart function mode
timefmt:1, // time format
datefmt:2, // date format
timesig:1, // time signal
keyb:3; // key/led brightness
u8 dwstop:1, // dual watch stop
unknown3:1,
sqlexp:1, // sql expansion
decbandsel:1, // decoding band select
dtmfmodenc:1, // dtmf mode encode
bell:3; // bell ringer
u8 unknown4:2,
btime:6; // lcd backlight time
u8 unknown5:2,
tz:6; // time zone
u8 unknown618E;
u8 unknown618F;
ul16 offseta; // work offset (left side)
ul16 offsetb; // work offset (right side)
ul16 mrcha; // selected memory channel (left)
ul16 mrchb; // selected memory channel (right)
ul16 wpricha; // work priority channel (left)
ul16 wprichb; // work priority channel (right)
u8 unknown6:3,
datasql:2, // data squelch
dataspd:1, // data speed
databnd:2; // data band select
u8 unknown7:1,
pfkey2:3, // mic p2 key
unknown8:1,
pfkey1:3; // mic p1 key
u8 unknown9:1,
pfkey4:3, // mic p4 key
unknowna:1,
pfkey3:3; // mic p3 key
u8 unknownb:7,
dtmfmoddec:1; // dtmf mode decode
} settings;
#seekto 0x61B0;
struct {
char line16[16];
} poweron_msg;
#seekto 0x6300;
struct {
u8 unknown1:3,
ttdgt:5; // dtmf digit time
u8 unknown2:3,
ttint:5; // dtmf interval time
u8 unknown3:3,
tt1stdgt:5; // dtmf 1st digit time
u8 unknown4:3,
tt1stdly:5; // dtmf 1st digit delay
u8 unknown5:3,
ttdlyqt:5; // dtmf delay when use qt
u8 unknown6:3,
ttdkey:5; // dtmf d key function
u8 unknown7;
u8 unknown8:4,
ttautod:4; // dtmf auto dial group
} dtmf;
#seekto 0x6330;
struct {
u8 unknown1:7,
ttsig:1; // dtmf signal
u8 unknown2:4,
ttintcode:4; // dtmf interval code
u8 unknown3:5,
ttgrpcode:3; // dtmf group code
u8 unknown4:4,
ttautorst:4; // dtmf auto reset time
u8 unknown5:5,
ttalert:3; // dtmf alert tone/transpond
} dtmf2;
#seekto 0x6360;
struct {
u8 code1[8]; // dtmf code
u8 code1_len; // dtmf code length
u8 unknown1[7];
u8 code2[8]; // dtmf code
u8 code2_len; // dtmf code length
u8 unknown2[7];
u8 code3[8]; // dtmf code
u8 code3_len; // dtmf code length
u8 unknown3[7];
u8 code4[8]; // dtmf code
u8 code4_len; // dtmf code length
u8 unknown4[7];
u8 code5[8]; // dtmf code
u8 code5_len; // dtmf code length
u8 unknown5[7];
u8 code6[8]; // dtmf code
u8 code6_len; // dtmf code length
u8 unknown6[7];
u8 code7[8]; // dtmf code
u8 code7_len; // dtmf code length
u8 unknown7[7];
u8 code8[8]; // dtmf code
u8 code8_len; // dtmf code length
u8 unknown8[7];
u8 code9[8]; // dtmf code
u8 code9_len; // dtmf code length
u8 unknown9[7];
} dtmfcode;
"""
MEM_SIZE = 0x8000
BLOCK_SIZE = 0x40
MODES = ["FM", "Auto", "NFM", "AM"]
SKIP_VALUES = ["", "S"]
TONES = chirp_common.TONES
DTCS_CODES = chirp_common.DTCS_CODES
NAME_LENGTH = 6
DTMF_CHARS = list("0123456789ABCD*#")
STIMEOUT = 1
# Basic settings lists
LIST_AFTONE = ["Low-3", "Low-2", "Low-1", "Normal", "High-1", "High-2"]
LIST_SPKR = ["Off", "Front", "Rear", "Front + Rear"]
LIST_AUDIO = ["Monaural", "Stereo"]
LIST_SBMUTE = ["Off", "TX", "RX", "Both"]
LIST_MLNHM = ["Min", "Low", "Normal", "High", "Max"]
LIST_PTT = ["Momentary", "Toggle"]
LIST_RXEXP = ["General", "Wide coverage"]
LIST_VOX = ["Off", "Internal mic", "Front hand-mic", "Rear hand-mic"]
LIST_DISPLAY = ["Frequency", "Timer/Clock"]
LIST_MINMAX = ["Min"] + ["%s" % x for x in range(2, 8)] + ["Max"]
LIST_COLOR = ["White-Blue", "Sky-Blue", "Marine-Blue", "Green",
"Yellow-Green", "Orange", "Amber", "White"]
LIST_BTIME = ["Continuous"] + ["%s" % x for x in range(1, 61)]
LIST_MRSCAN = ["All", "Selected"]
LIST_DWSTOP = ["Auto", "Hold"]
LIST_SCAND = ["Down", "Up"]
LIST_SCANR = ["Busy", "Hold", "1 sec", "3 sec", "5 sec"]
LIST_APO = ["Off", ".5", "1", "1.5"] + ["%s" % x for x in range(2, 13)]
LIST_BEEP = ["Off", "Low", "High"]
LIST_FKEY = ["MHz/AD-F", "AF Dual 1(line-in)", "AF Dual 2(AM)", "AF Dual 3(FM)",
"PA", "SQL off", "T-call", "WX"]
LIST_PFKEY = ["Off", "SQL off", "TX power", "Scan", "RPT shift", "Reverse",
"T-Call"]
LIST_AB = ["A", "B"]
LIST_COVERAGE = ["In band", "All"]
LIST_TOT = ["Off"] + ["%s" % x for x in range(5, 25, 5)] + ["30"]
LIST_DATEFMT = ["yyyy/mm/dd", "yyyy/dd/mm", "mm/dd/yyyy", "dd/mm/yyyy"]
LIST_TIMEFMT = ["24H", "12H"]
LIST_TZ = ["-12 INT DL W",
"-11 MIDWAY",
"-10 HAST",
"-9 AKST",
"-8 PST",
"-7 MST",
"-6 CST",
"-5 EST",
"-4:30 CARACAS",
"-4 AST",
"-3:30 NST",
"-3 BRASILIA",
"-2 MATLANTIC",
"-1 AZORES",
"-0 LONDON",
"+0 LONDON",
"+1 ROME",
"+2 ATHENS",
"+3 MOSCOW",
"+3:30 REHRW",
"+4 ABUDNABI",
"+4:30 KABUL",
"+5 ISLMABAD",
"+5:30 NEWDELHI",
"+6 DHAKA",
"+6:30 YANGON",
"+7 BANKOK",
"+8 BEIJING",
"+9 TOKYO",
"+10 ADELAIDE",
"+10 SYDNET",
"+11 NWCLDNIA",
"+12 FIJI",
"+13 NUKALOFA"
]
LIST_BELL = ["Off", "1 time", "3 times", "5 times", "8 times", "Continuous"]
LIST_DATABND = ["Main band", "Sub band", "Left band-fixed", "Right band-fixed"]
LIST_DATASPD = ["1200 bps", "9600 bps"]
LIST_DATASQL = ["Busy/TX", "Busy", "TX"]
# Other settings lists
LIST_CPUCLK = ["Clock frequency 1", "Clock frequency 2"]
# Work mode settings lists
LIST_WORK = ["VFO", "Memory System"]
LIST_WBANDB = ["Air", "H-V", "GR1-V", "GR1-U", "H-U", "GR2"]
LIST_WBANDA = ["Line-in", "AM", "FM"] + LIST_WBANDB
LIST_SQL = ["Open"] + ["%s" % x for x in range(1, 10)]
LIST_STEP = ["Auto", "2.50 KHz", "5.00 KHz", "6.25 KHz", "8.33 KHz",
"9.00 KHz", "10.00 KHz", "12.50 KHz", "15.00 KHz", "20.00 KHz",
"25.00 KHz", "50.00 KHz", "100.00 KHz", "200.00 KHz"]
LIST_SMODE = ["F-1", "F-2"]
# DTMF settings lists
LIST_TTDKEY = ["D code"] + ["Send delay %s s" % x for x in range(1, 17)]
LIST_TT200 = ["%s ms" % x for x in range(50, 210, 10)]
LIST_TT1000 = ["%s ms" % x for x in range(100, 1050, 50)]
LIST_TTSIG = ["Code squelch", "Select call"]
LIST_TTAUTORST = ["Off"] + ["%s s" % x for x in range(1, 16)]
LIST_TTGRPCODE = ["Off"] + list("ABCD*#")
LIST_TTINTCODE = DTMF_CHARS
LIST_TTALERT = ["Off", "Alert tone", "Transpond", "Transpond-ID code",
"Transpond-transpond code"]
LIST_TTAUTOD = ["%s" % x for x in range(1, 10)]
# valid chars on the LCD
VALID_CHARS = chirp_common.CHARSET_ALPHANUMERIC + \
"`{|}!\"#$%&'()*+,-./:;<=>?@[]^_"
# Power Levels
POWER_LEVELS = [chirp_common.PowerLevel("Low", watts=5),
chirp_common.PowerLevel("Mid", watts=20),
chirp_common.PowerLevel("High", watts=50)]
# B-TECH UV-50X3 id string
UV50X3_id = "VGC6600MD"
def _clean_buffer(radio):
radio.pipe.timeout = 0.005
junk = radio.pipe.read(256)
radio.pipe.timeout = STIMEOUT
if junk:
Log.debug("Got %i bytes of junk before starting" % len(junk))
def _check_for_double_ack(radio):
radio.pipe.timeout = 0.005
c = radio.pipe.read(1)
radio.pipe.timeout = STIMEOUT
if c and c != '\x06':
_exit_program_mode(radio)
raise errors.RadioError('Expected nothing or ACK, got %r' % c)
def _rawrecv(radio, amount):
"""Raw read from the radio device"""
data = ""
try:
data = radio.pipe.read(amount)
except:
_exit_program_mode(radio)
msg = "Generic error reading data from radio; check your cable."
raise errors.RadioError(msg)
if len(data) != amount:
_exit_program_mode(radio)
msg = "Error reading data from radio: not the amount of data we want."
raise errors.RadioError(msg)
return data
def _rawsend(radio, data):
"""Raw send to the radio device"""
try:
radio.pipe.write(data)
except:
raise errors.RadioError("Error sending data to radio")
def _make_frame(cmd, addr, length, data=""):
"""Pack the info in the headder format"""
frame = struct.pack(">BHB", ord(cmd), addr, length)
# add the data if set
if len(data) != 0:
frame += data
# return the data
return frame
def _recv(radio, addr, length=BLOCK_SIZE):
"""Get data from the radio """
# read 4 bytes of header
hdr = _rawrecv(radio, 4)
# check for unexpected extra command byte
c, a, l = struct.unpack(">BHB", hdr)
if hdr[0:2] == "WW" and a != addr:
# extra command byte detected
# throw away the 1st byte and add the next byte in the buffer
hdr = hdr[1:] + _rawrecv(radio, 1)
# read 64 bytes (0x40) of data
data = _rawrecv(radio, (BLOCK_SIZE))
# DEBUG
LOG.info("Response:")
LOG.debug(util.hexprint(hdr + data))
c, a, l = struct.unpack(">BHB", hdr)
if a != addr or l != length or c != ord("W"):
_exit_program_mode(radio)
LOG.error("Invalid answer for block 0x%04x:" % addr)
LOG.debug("CMD: %s ADDR: %04x SIZE: %02x" % (c, a, l))
raise errors.RadioError("Unknown response from the radio")
return data
def _do_ident(radio):
"""Put the radio in PROGRAM mode & identify it"""
# set the serial discipline
radio.pipe.baudrate = 115200
radio.pipe.parity = "N"
radio.pipe.timeout = STIMEOUT
# flush input buffer
_clean_buffer(radio)
magic = "V66LINK"
_rawsend(radio, magic)
# Ok, get the ident string
ident = _rawrecv(radio, 9)
# check if ident is OK
if ident != radio.IDENT:
# bad ident
msg = "Incorrect model ID, got this:"
msg += util.hexprint(ident)
LOG.debug(msg)
raise errors.RadioError("Radio identification failed.")
# DEBUG
LOG.info("Positive ident, got this:")
LOG.debug(util.hexprint(ident))
return True
def _exit_program_mode(radio):
endframe = "\x45"
_rawsend(radio, endframe)
def _download(radio):
"""Get the memory map"""
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
data = ""
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
frame = _make_frame("R", addr, BLOCK_SIZE)
# DEBUG
LOG.info("Request sent:")
LOG.debug(util.hexprint(frame))
# sending the read request
_rawsend(radio, frame)
# now we read
d = _recv(radio, addr)
# aggregate the data
data += d
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
_exit_program_mode(radio)
return data
def _upload(radio):
"""Upload procedure"""
MEM_SIZE = 0x7000
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
# the fun start here
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
# sending the data
data = radio.get_mmap()[addr:addr + BLOCK_SIZE]
frame = _make_frame("W", addr, BLOCK_SIZE, data)
_rawsend(radio, frame)
# receiving the response
ack = _rawrecv(radio, 1)
if ack != "\x06":
_exit_program_mode(radio)
msg = "Bad ack writing block 0x%04x" % addr
raise errors.RadioError(msg)
_check_for_double_ack(radio)
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
_exit_program_mode(radio)
def model_match(cls, data):
"""Match the opened/downloaded image to the correct version"""
rid = data[0x6140:0x6148]
#if rid in cls._fileid:
if rid in cls.IDENT:
return True
return False
class VGCStyleRadio(chirp_common.CloneModeRadio,
chirp_common.ExperimentalRadio):
"""BTECH's UV-50X3"""
VENDOR = "BTECH"
_air_range = (108000000, 136000000)
_vhf_range = (136000000, 174000000)
_vhf2_range = (174000000, 250000000)
_220_range = (222000000, 225000000)
_gen1_range = (300000000, 400000000)
_uhf_range = (400000000, 480000000)
_gen2_range = (480000000, 520000000)
_upper = 499
MODEL = ""
IDENT = ""
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.experimental = \
('The UV-50X3 driver is a beta version.\n'
'\n'
'Please save an unedited copy of your first successful\n'
'download to a CHIRP Radio Images(*.img) file.'
)
rp.pre_download = _(dedent("""\
Follow this instructions to download your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the download of your radio data
"""))
rp.pre_upload = _(dedent("""\
Follow this instructions to upload your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the upload of your radio data
"""))
return rp
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_settings = True
rf.has_bank = False
rf.has_tuning_step = False
rf.can_odd_split = True
rf.has_name = True
rf.has_offset = True
rf.has_mode = True
rf.has_dtcs = True
rf.has_rx_dtcs = True
rf.has_dtcs_polarity = True
rf.has_ctone = True
rf.has_cross = True
rf.has_sub_devices = self.VARIANT == ""
rf.valid_modes = MODES
rf.valid_characters = VALID_CHARS
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_tmodes = ['', 'Tone', 'TSQL', 'DTCS', 'Cross']
rf.valid_cross_modes = [
"Tone->Tone",
"DTCS->",
"->DTCS",
"Tone->DTCS",
"DTCS->Tone",
"->Tone",
"DTCS->DTCS"]
rf.valid_power_levels = POWER_LEVELS
rf.valid_skips = SKIP_VALUES
rf.valid_name_length = NAME_LENGTH
rf.valid_dtcs_codes = DTCS_CODES
rf.valid_bands = [self._air_range,
self._vhf_range,
self._vhf2_range,
self._220_range,
self._gen1_range,
self._uhf_range,
self._gen2_range]
rf.memory_bounds = (0, self._upper)
return rf
def get_sub_devices(self):
return [UV50X3Left(self._mmap), UV50X3Right(self._mmap)]
def sync_in(self):
"""Download from radio"""
try:
data = _download(self)
except errors.RadioError:
# Pass through any real errors we raise
raise
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during download')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
self._mmap = memmap.MemoryMap(data)
self.process_mmap()
def sync_out(self):
"""Upload to radio"""
try:
_upload(self)
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during upload')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
def process_mmap(self):
"""Process the mem map into the mem object"""
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
def get_raw_memory(self, number):
return repr(self._memobj.memory[number])
def decode_tone(self, val):
"""Parse the tone data to decode from mem, it returns:
Mode (''|DTCS|Tone), Value (None|###), Polarity (None,N,R)"""
if val.get_raw() == "\xFF\xFF":
return '', None, None
val = int(val)
if val >= 12000:
a = val - 12000
return 'DTCS', a, 'R'
elif val >= 8000:
a = val - 8000
return 'DTCS', a, 'N'
else:
a = val / 10.0
return 'Tone', a, None
def encode_tone(self, memval, mode, value, pol):
"""Parse the tone data to encode from UI to mem"""
if mode == '':
memval[0].set_raw(0xFF)
memval[1].set_raw(0xFF)
elif mode == 'Tone':
memval.set_value(int(value * 10))
elif mode == 'DTCS':
flag = 0x80 if pol == 'N' else 0xC0
memval.set_value(value)
memval[1].set_bits(flag)
else:
raise Exception("Internal error: invalid mode `%s'" % mode)
def _memory_obj(self, suffix=""):
return getattr(self._memobj, "%s_memory%s" % (self._vfo, suffix))
def _name_obj(self, suffix=""):
return getattr(self._memobj, "%s_names%s" % (self._vfo, suffix))
def _scan_obj(self, suffix=""):
return getattr(self._memobj, "%s_scanflags%s" % (self._vfo, suffix))
def _used_obj(self, suffix=""):
return getattr(self._memobj, "%s_usedflags%s" % (self._vfo, suffix))
def get_memory(self, number):
"""Get the mem representation from the radio image"""
bitpos = (1 << (number % 8))
bytepos = (number / 8)
_mem = self._memory_obj()[number]
_names = self._name_obj()[number]
_scn = self._scan_obj()[bytepos]
_usd = self._used_obj()[bytepos]
isused = bitpos & int(_usd)
isscan = bitpos & int(_scn)
# Create a high-level memory object to return to the UI
mem = chirp_common.Memory()
# Memory number
mem.number = number
if not isused:
mem.empty = True
return mem
# Freq and offset
mem.freq = int(_mem.rxfreq) * 10
# tx freq can be blank
if _mem.get_raw()[4] == "\xFF":
# TX freq not set
mem.offset = 0
mem.duplex = "off"
else:
# TX feq set
offset = (int(_mem.txfreq) * 10) - mem.freq
if offset < 0:
mem.offset = abs(offset)
mem.duplex = "-"
elif offset > 0:
mem.offset = offset
mem.duplex = "+"
else:
mem.offset = 0
# skip
if not isscan:
mem.skip = "S"
# name TAG of the channel
mem.name = str(_names.name).strip("\xFF")
# power
mem.power = POWER_LEVELS[int(_mem.txp)]
# wide/narrow
mem.mode = MODES[int(_mem.wn)]
# tone data
rxtone = txtone = None
txtone = self.decode_tone(_mem.txtone)
rxtone = self.decode_tone(_mem.rxtone)
chirp_common.split_tone_decode(mem, txtone, rxtone)
# Extra
mem.extra = RadioSettingGroup("extra", "Extra")
bcl = RadioSetting("bcl", "Busy channel lockout",
RadioSettingValueBoolean(bool(_mem.bcl)))
mem.extra.append(bcl)
revert = RadioSetting("revert", "Revert",
RadioSettingValueBoolean(bool(_mem.revert)))
mem.extra.append(revert)
dname = RadioSetting("dname", "Display name",
RadioSettingValueBoolean(bool(_mem.dname)))
mem.extra.append(dname)
return mem
def set_memory(self, mem):
"""Set the memory data in the eeprom img from the UI"""
bitpos = (1 << (mem.number % 8))
bytepos = (mem.number / 8)
_mem = self._memory_obj()[mem.number]
_names = self._name_obj()[mem.number]
_scn = self._scan_obj()[bytepos]
_usd = self._used_obj()[bytepos]
if mem.empty:
_usd &= ~bitpos
_scn &= ~bitpos
_mem.set_raw("\xFF" * 16)
_names.name = ("\xFF" * 6)
return
else:
_usd |= bitpos
# frequency
_mem.rxfreq = mem.freq / 10
# duplex
if mem.duplex == "+":
_mem.txfreq = (mem.freq + mem.offset) / 10
elif mem.duplex == "-":
_mem.txfreq = (mem.freq - mem.offset) / 10
elif mem.duplex == "off":
for i in _mem.txfreq:
i.set_raw("\xFF")
elif mem.duplex == "split":
_mem.txfreq = mem.offset / 10
else:
_mem.txfreq = mem.freq / 10
# tone data
((txmode, txtone, txpol), (rxmode, rxtone, rxpol)) = \
chirp_common.split_tone_encode(mem)
self.encode_tone(_mem.txtone, txmode, txtone, txpol)
self.encode_tone(_mem.rxtone, rxmode, rxtone, rxpol)
# name TAG of the channel
_names.name = mem.name.ljust(6, "\xFF")
# power level, # default power level is low
_mem.txp = 0 if mem.power is None else POWER_LEVELS.index(mem.power)
# wide/narrow
_mem.wn = MODES.index(mem.mode)
if mem.skip == "S":
_scn &= ~bitpos
else:
_scn |= bitpos
# autoset display to display name if filled
if mem.extra:
# mem.extra only seems to be populated when called from edit panel
dname = mem.extra["dname"]
else:
dname = None
if mem.name:
_mem.dname = True
if dname and not dname.changed():
dname.value = True
else:
_mem.dname = False
if dname and not dname.changed():
dname.value = False
# reseting unknowns, this has to be set by hand
_mem.unknown0 = 0
_mem.unknown1 = 0
_mem.unknown2 = 0
_mem.unknown3 = 0
# extra settings
if len(mem.extra) > 0:
# there are setting, parse
for setting in mem.extra:
setattr(_mem, setting.get_name(), setting.value)
else:
# there are no extra settings, load defaults
_mem.bcl = 0
_mem.revert = 0
_mem.dname = 1
def _bbcd2dtmf(self, bcdarr, strlen=16):
# doing bbcd, but with support for ABCD*#
LOG.debug(bcdarr.get_value())
string = ''.join("%02X" % b for b in bcdarr)
LOG.debug("@_bbcd2dtmf, received: %s" % string)
string = string.replace('E', '*').replace('F', '#')
if strlen <= 16:
string = string[:strlen]
return string
def _dtmf2bbcd(self, value):
dtmfstr = value.get_value()
dtmfstr = dtmfstr.replace('*', 'E').replace('#', 'F')
dtmfstr = str.ljust(dtmfstr.strip(), 16, "F")
bcdarr = list(bytearray.fromhex(dtmfstr))
LOG.debug("@_dtmf2bbcd, sending: %s" % bcdarr)
return bcdarr
def get_settings(self):
"""Translate the bit in the mem_struct into settings in the UI"""
_mem = self._memobj
basic = RadioSettingGroup("basic", "Basic Settings")
other = RadioSettingGroup("other", "Other Settings")
work = RadioSettingGroup("work", "Work Mode Settings")
dtmf = RadioSettingGroup("dtmf", "DTMF Settings")
top = RadioSettings(basic, other, work, dtmf)
# Basic
# Audio: A01-A04
aftone = RadioSetting("settings.aftone", "AF tone control",
RadioSettingValueList(LIST_AFTONE, LIST_AFTONE[
_mem.settings.aftone]))
basic.append(aftone)
spkr = RadioSetting("settings.spkr", "Speaker",
RadioSettingValueList(LIST_SPKR,LIST_SPKR[
_mem.settings.spkr]))
basic.append(spkr)
audio = RadioSetting("settings.audio", "Stereo/Mono",
RadioSettingValueList(LIST_AUDIO, LIST_AUDIO[
_mem.settings.audio]))
basic.append(audio)
sbmute = RadioSetting("settings.sbmute", "Sub band mute",
RadioSettingValueList(LIST_SBMUTE, LIST_SBMUTE[
_mem.settings.sbmute]))
basic.append(sbmute)
# TX/RX: B01-B08
mgain = RadioSetting("settings.mgain", "Mic gain",
RadioSettingValueList(LIST_MLNHM, LIST_MLNHM[
_mem.settings.mgain]))
basic.append(mgain)
ptt = RadioSetting("settings.ptt", "PTT mode",
RadioSettingValueList(LIST_PTT,LIST_PTT[
_mem.settings.ptt]))
basic.append(ptt)
# B03 (per channel)
# B04 (per channel)
rxexp = RadioSetting("settings.rxexp", "RX expansion",
RadioSettingValueList(LIST_RXEXP,LIST_RXEXP[
_mem.settings.rxexp]))
basic.append(rxexp)
vox = RadioSetting("settings.vox", "Vox",
RadioSettingValueList(LIST_VOX, LIST_VOX[
_mem.settings.vox]))
basic.append(vox)
voxs = RadioSetting("settings.voxs", "Vox sensitivity",
RadioSettingValueList(LIST_MLNHM, LIST_MLNHM[
_mem.settings.voxs]))
basic.append(voxs)
# B08 (per channel)
# Display: C01-C06
display = RadioSetting("settings.display", "Display select",
RadioSettingValueList(LIST_DISPLAY,
LIST_DISPLAY[_mem.settings.display]))
basic.append(display)
lcdb = RadioSetting("settings.lcdb", "LCD brightness",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.lcdb]))
basic.append(lcdb)
color = RadioSetting("settings.color", "LCD color",
RadioSettingValueList(LIST_COLOR, LIST_COLOR[
_mem.settings.color]))
basic.append(color)
lcdc = RadioSetting("settings.lcdc", "LCD contrast",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.lcdc]))
basic.append(lcdc)
btime = RadioSetting("settings.btime", "LCD backlight time",
RadioSettingValueList(LIST_BTIME, LIST_BTIME[
_mem.settings.btime]))
basic.append(btime)
keyb = RadioSetting("settings.keyb", "Key brightness",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.keyb]))
basic.append(keyb)
# Memory: D01-D04
# D01 (per channel)
# D02 (per channel)
mrscan = RadioSetting("settings.mrscan", "Memory scan type",
RadioSettingValueList(LIST_MRSCAN, LIST_MRSCAN[
_mem.settings.mrscan]))
basic.append(mrscan)
# D04 (per channel)
# Scan: E01-E04
dwstop = RadioSetting("settings.dwstop", "Dual watch stop",
RadioSettingValueList(LIST_DWSTOP, LIST_DWSTOP[
_mem.settings.dwstop]))
basic.append(dwstop)
scand = RadioSetting("settings.scand", "Scan direction",
RadioSettingValueList(LIST_SCAND,LIST_SCAND[
_mem.settings.scand]))
basic.append(scand)
scanr = RadioSetting("settings.scanr", "Scan resume",
RadioSettingValueList(LIST_SCANR,LIST_SCANR[
_mem.settings.scanr]))
basic.append(scanr)
scansb = RadioSetting("settings.scansb", "Scan stop beep",
RadioSettingValueBoolean(_mem.settings.scansb))
basic.append(scansb)
# System: F01-F09
apo = RadioSetting("settings.apo", "Automatic power off [hours]",
RadioSettingValueList(LIST_APO, LIST_APO[
_mem.settings.apo]))
basic.append(apo)
ars = RadioSetting("settings.ars", "Automatic repeater shift",
RadioSettingValueBoolean(_mem.settings.ars))
basic.append(ars)
beep = RadioSetting("settings.beep", "Beep volume",
RadioSettingValueList(LIST_BEEP,LIST_BEEP[
_mem.settings.beep]))
basic.append(beep)
fkey = RadioSetting("settings.fkey", "F key",
RadioSettingValueList(LIST_FKEY,LIST_FKEY[
_mem.settings.fkey]))
basic.append(fkey)
pfkey1 = RadioSetting("settings.pfkey1", "Mic P1 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey1]))
basic.append(pfkey1)
pfkey2 = RadioSetting("settings.pfkey2", "Mic P2 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey2]))
basic.append(pfkey2)
pfkey3 = RadioSetting("settings.pfkey3", "Mic P3 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey3]))
basic.append(pfkey3)
pfkey4 = RadioSetting("settings.pfkey4", "Mic P4 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey4]))
basic.append(pfkey4)
omode = RadioSetting("settings.omode", "Operation mode",
RadioSettingValueList(LIST_AB,LIST_AB[
_mem.settings.omode]))
basic.append(omode)
rxcoverm = RadioSetting("settings.rxcoverm", "RX coverage - memory",
RadioSettingValueList(LIST_COVERAGE,
LIST_COVERAGE[_mem.settings.rxcoverm]))
basic.append(rxcoverm)
rxcoverv = RadioSetting("settings.rxcoverv", "RX coverage - VFO",
RadioSettingValueList(LIST_COVERAGE,
LIST_COVERAGE[_mem.settings.rxcoverv]))
basic.append(rxcoverv)
tot = RadioSetting("settings.tot", "Time out timer [min]",
RadioSettingValueList(LIST_TOT, LIST_TOT[
_mem.settings.tot]))
basic.append(tot)
# Timer/Clock: G01-G04
# G01
datefmt = RadioSetting("settings.datefmt", "Date format",
RadioSettingValueList(LIST_DATEFMT,
LIST_DATEFMT[_mem.settings.datefmt]))
basic.append(datefmt)
timefmt = RadioSetting("settings.timefmt", "Time format",
RadioSettingValueList(LIST_TIMEFMT,
LIST_TIMEFMT[_mem.settings.timefmt]))
basic.append(timefmt)
timesig = RadioSetting("settings.timesig", "Time signal",
RadioSettingValueBoolean(_mem.settings.timesig))
basic.append(timesig)
tz = RadioSetting("settings.tz", "Time zone",
RadioSettingValueList(LIST_TZ, LIST_TZ[
_mem.settings.tz]))
basic.append(tz)
# Signaling: H01-H06
bell = RadioSetting("settings.bell", "Bell ringer",
RadioSettingValueList(LIST_BELL, LIST_BELL[
_mem.settings.bell]))
basic.append(bell)
# H02 (per channel)
dtmfmodenc = RadioSetting("settings.dtmfmodenc", "DTMF mode encode",
RadioSettingValueBoolean(
_mem.settings.dtmfmodenc))
basic.append(dtmfmodenc)
dtmfmoddec = RadioSetting("settings.dtmfmoddec", "DTMF mode decode",
RadioSettingValueBoolean(
_mem.settings.dtmfmoddec))
basic.append(dtmfmoddec)
# H04 (per channel)
decbandsel = RadioSetting("settings.decbandsel", "DTMF band select",
RadioSettingValueList(LIST_AB,LIST_AB[
_mem.settings.decbandsel]))
basic.append(decbandsel)
sqlexp = RadioSetting("settings.sqlexp", "SQL expansion",
RadioSettingValueBoolean(_mem.settings.sqlexp))
basic.append(sqlexp)
# Pkt: I01-I03
databnd = RadioSetting("settings.databnd", "Packet data band",
RadioSettingValueList(LIST_DATABND,LIST_DATABND[
_mem.settings.databnd]))
basic.append(databnd)
dataspd = RadioSetting("settings.dataspd", "Packet data speed",
RadioSettingValueList(LIST_DATASPD,LIST_DATASPD[
_mem.settings.dataspd]))
basic.append(dataspd)
datasql = RadioSetting("settings.datasql", "Packet data squelch",
RadioSettingValueList(LIST_DATASQL,LIST_DATASQL[
_mem.settings.datasql]))
basic.append(datasql)
# Other
dw = RadioSetting("settings.dw", "Dual watch",
RadioSettingValueBoolean(_mem.settings.dw))
other.append(dw)
cpuclk = RadioSetting("settings.cpuclk", "CPU clock frequency",
RadioSettingValueList(LIST_CPUCLK,LIST_CPUCLK[
_mem.settings.cpuclk]))
other.append(cpuclk)
def _filter(name):
filtered = ""
for char in str(name):
if char in VALID_CHARS:
filtered += char
else:
filtered += " "
return filtered
line16 = RadioSetting("poweron_msg.line16", "Power-on message",
RadioSettingValueString(0, 16, _filter(
_mem.poweron_msg.line16)))
other.append(line16)
line32 = RadioSetting("embedded_msg.line32", "Embedded message",
RadioSettingValueString(0, 32, _filter(
_mem.embedded_msg.line32)))
other.append(line32)
# Work
workmoda = RadioSetting("settings.workmoda", "Work mode A",
RadioSettingValueList(LIST_WORK,LIST_WORK[
_mem.settings.workmoda]))
work.append(workmoda)
workmodb = RadioSetting("settings.workmodb", "Work mode B",
RadioSettingValueList(LIST_WORK,LIST_WORK[
_mem.settings.workmodb]))
work.append(workmodb)
wbanda = RadioSetting("settings.wbanda", "Work band A",
RadioSettingValueList(LIST_WBANDA, LIST_WBANDA[
(_mem.settings.wbanda) - 1]))
work.append(wbanda)
wbandb = RadioSetting("settings.wbandb", "Work band B",
RadioSettingValueList(LIST_WBANDB, LIST_WBANDB[
(_mem.settings.wbandb) - 4]))
work.append(wbandb)
sqla = RadioSetting("settings.sqla", "Squelch A",
RadioSettingValueList(LIST_SQL, LIST_SQL[
_mem.settings.sqla]))
work.append(sqla)
sqlb = RadioSetting("settings.sqlb", "Squelch B",
RadioSettingValueList(LIST_SQL, LIST_SQL[
_mem.settings.sqlb]))
work.append(sqlb)
stepa = RadioSetting("settings.stepa", "Auto step A",
RadioSettingValueList(LIST_STEP,LIST_STEP[
_mem.settings.stepa]))
work.append(stepa)
stepb = RadioSetting("settings.stepb", "Auto step B",
RadioSettingValueList(LIST_STEP,LIST_STEP[
_mem.settings.stepb]))
work.append(stepb)
mrcha = RadioSetting("settings.mrcha", "Current channel A",
RadioSettingValueInteger(0, 499,
_mem.settings.mrcha))
work.append(mrcha)
mrchb = RadioSetting("settings.mrchb", "Current channel B",
RadioSettingValueInteger(0, 499,
_mem.settings.mrchb))
work.append(mrchb)
val = _mem.settings.offseta / 100.00
offseta = RadioSetting("settings.offseta", "Offset A (0-37.95)",
RadioSettingValueFloat(0, 38.00, val, 0.05, 2))
work.append(offseta)
val = _mem.settings.offsetb / 100.00
offsetb = RadioSetting("settings.offsetb", "Offset B (0-79.95)",
RadioSettingValueFloat(0, 80.00, val, 0.05, 2))
work.append(offsetb)
wpricha = RadioSetting("settings.wpricha", "Priority channel A",
RadioSettingValueInteger(0, 499,
_mem.settings.wpricha))
work.append(wpricha)
wprichb = RadioSetting("settings.wprichb", "Priority channel B",
RadioSettingValueInteger(0, 499,
_mem.settings.wprichb))
work.append(wprichb)
smode = RadioSetting("settings.smode", "Smart function mode",
RadioSettingValueList(LIST_SMODE,LIST_SMODE[
_mem.settings.smode]))
work.append(smode)
# dtmf
ttdkey = RadioSetting("dtmf.ttdkey", "D key function",
RadioSettingValueList(LIST_TTDKEY, LIST_TTDKEY[
_mem.dtmf.ttdkey]))
dtmf.append(ttdkey)
ttdgt = RadioSetting("dtmf.ttdgt", "Digit time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.ttdgt) - 5]))
dtmf.append(ttdgt)
ttint = RadioSetting("dtmf.ttint", "Interval time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.ttint) - 5]))
dtmf.append(ttint)
tt1stdgt = RadioSetting("dtmf.tt1stdgt", "1st digit time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.tt1stdgt) - 5]))
dtmf.append(tt1stdgt)
tt1stdly = RadioSetting("dtmf.tt1stdly", "1st digit delay time",
RadioSettingValueList(LIST_TT1000, LIST_TT1000[
(_mem.dtmf.tt1stdly) - 2]))
dtmf.append(tt1stdly)
ttdlyqt = RadioSetting("dtmf.ttdlyqt", "Digit delay when use qt",
RadioSettingValueList(LIST_TT1000, LIST_TT1000[
(_mem.dtmf.ttdlyqt) - 2]))
dtmf.append(ttdlyqt)
ttsig = RadioSetting("dtmf2.ttsig", "Signal",
RadioSettingValueList(LIST_TTSIG, LIST_TTSIG[
_mem.dtmf2.ttsig]))
dtmf.append(ttsig)
ttautorst = RadioSetting("dtmf2.ttautorst", "Auto reset time",
RadioSettingValueList(LIST_TTAUTORST,
LIST_TTAUTORST[_mem.dtmf2.ttautorst]))
dtmf.append(ttautorst)
if _mem.dtmf2.ttgrpcode > 0x06:
val = 0x00
else:
val = _mem.dtmf2.ttgrpcode
ttgrpcode = RadioSetting("dtmf2.ttgrpcode", "Group code",
RadioSettingValueList(LIST_TTGRPCODE,
LIST_TTGRPCODE[val]))
dtmf.append(ttgrpcode)
ttintcode = RadioSetting("dtmf2.ttintcode", "Interval code",
RadioSettingValueList(LIST_TTINTCODE,
LIST_TTINTCODE[_mem.dtmf2.ttintcode]))
dtmf.append(ttintcode)
if _mem.dtmf2.ttalert > 0x04:
val = 0x00
else:
val = _mem.dtmf2.ttalert
ttalert = RadioSetting("dtmf2.ttalert", "Alert tone/transpond",
RadioSettingValueList(LIST_TTALERT,
LIST_TTALERT[val]))
dtmf.append(ttalert)
ttautod = RadioSetting("dtmf.ttautod", "Auto dial group",
RadioSettingValueList(LIST_TTAUTOD,
LIST_TTAUTOD[_mem.dtmf.ttautod]))
dtmf.append(ttautod)
# setup 9 dtmf autodial entries
for i in map(str, range(1, 10)):
objname = "code" + i
strname = "Code " + str(i)
dtmfsetting = getattr(_mem.dtmfcode, objname)
dtmflen = getattr(_mem.dtmfcode, objname + "_len")
dtmfstr = self._bbcd2dtmf(dtmfsetting, dtmflen)
code = RadioSettingValueString(0, 16, dtmfstr)
code.set_charset(DTMF_CHARS + list(" "))
rs = RadioSetting("dtmfcode." + objname, strname, code)
dtmf.append(rs)
return top
def set_settings(self, settings):
_settings = self._memobj.settings
_mem = self._memobj
for element in settings:
if not isinstance(element, RadioSetting):
self.set_settings(element)
continue
else:
try:
name = element.get_name()
if "." in name:
bits = name.split(".")
obj = self._memobj
for bit in bits[:-1]:
if "/" in bit:
bit, index = bit.split("/", 1)
index = int(index)
obj = getattr(obj, bit)[index]
else:
obj = getattr(obj, bit)
setting = bits[-1]
else:
obj = _settings
setting = element.get_name()
if element.has_apply_callback():
LOG.debug("Using apply callback")
element.run_apply_callback()
elif setting == "line16":
setattr(obj, setting, str(element.value).rstrip(
" ").ljust(16, "\xFF"))
elif setting == "line32":
setattr(obj, setting, str(element.value).rstrip(
" ").ljust(32, "\xFF"))
elif setting == "wbanda":
setattr(obj, setting, int(element.value) + 1)
elif setting == "wbandb":
setattr(obj, setting, int(element.value) + 4)
elif setting in ["offseta", "offsetb"]:
val = element.value
value = int(val.get_value() * 100)
setattr(obj, setting, value)
elif setting in ["ttdgt", "ttint", "tt1stdgt"]:
setattr(obj, setting, int(element.value) + 5)
elif setting in ["tt1stdly", "ttdlyqt"]:
setattr(obj, setting, int(element.value) + 2)
elif re.match('code\d', setting):
# set dtmf length field and then get bcd dtmf
dtmfstrlen = len(str(element.value).strip())
setattr(_mem.dtmfcode, setting + "_len", dtmfstrlen)
dtmfstr = self._dtmf2bbcd(element.value)
setattr(_mem.dtmfcode, setting, dtmfstr)
elif element.value.get_mutable():
LOG.debug("Setting %s = %s" % (setting, element.value))
setattr(obj, setting, element.value)
except Exception, e:
LOG.debug(element.get_name())
raise
@classmethod
def match_model(cls, filedata, filename):
match_size = False
match_model = False
# testing the file data size
if len(filedata) == MEM_SIZE:
match_size = True
# testing the firmware model fingerprint
match_model = model_match(cls, filedata)
if match_size and match_model:
return True
else:
return False
@directory.register
class UV50X3(VGCStyleRadio):
"""BTech UV-50X3"""
MODEL = "UV-50X3"
IDENT = UV50X3_id
class UV50X3Left(UV50X3):
VARIANT = "Left"
_vfo = "left"
class UV50X3Right(UV50X3):
VARIANT = "Right"
_vfo = "right"
| gpl-3.0 | -5,601,819,873,156,552,000 | 33.280193 | 80 | 0.520877 | false |
SCUEvals/scuevals-api | db/alembic/versions/20170928104413_update_departments_names_if_they_exist.py | 1 | 1423 | """Update departments names if they exist
Revision ID: 15bc0e6fa7a0
Revises: 7004250e3ef5
Create Date: 2017-09-28 10:44:13.788317
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '15bc0e6fa7a0'
down_revision = '7004250e3ef5'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(sa.text("""create or replace function update_departments(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_s_id numeric;
_count numeric := 0;
_abbr varchar;
_name varchar;
_school varchar;
begin
for _abbr, _name, _school in
select
department ->> 'value' as _abbr,
(regexp_matches(department ->> 'label', '.+(?=\s\()')) [1] as _name,
department ->> 'school' as _school
from jsonb_array_elements(_json -> 'departments') department
loop
-- get the school id
select id
into _s_id
from schools
where abbreviation = _school and university_id = _university_id;
-- upsert the department
insert into departments (abbreviation, name, school_id) values (_abbr, _name, _s_id)
on conflict on constraint departments_abbreviation_school_id_key do
update set name=_name where departments.abbreviation=_abbr and departments.school_id=_s_id;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
def downgrade():
pass
| agpl-3.0 | 7,266,190,210,782,691,000 | 23.534483 | 110 | 0.683767 | false |
egan/weinbot | src/Hardware/Alarm.py | 1 | 3813 | ##
# Hardware.py: Class implementing control of WEINBot alarm tones via GPIO.
#
# Copyright 2015, Egan McComb
#
##
import Adafruit_BBIO.GPIO as GPIO
import threading
import time
import logging
class Alarm():
"""
Alarm: A class that provides an interface to control the WEINBot STI
minicontroller alarm and tones (5 bit selector) via GPIO.
http://www.sti-usa.com/pdf/specs/SA5000.pdf
"""
# Alarm tone aliases. See page 5 of the documentation for a list of tones.
aliases = {
"water": 1,
"waste": 3,
"rev": 5,
"gen": 7
}
def __init__(self, pin_enable="P8_26", pins_selector=("P8_21", "P8_22", "P8_23", "P8_24", "P8_25")):
"""
pin_enable: BBB GPIO pin controlling alarm on/off.
pins_selector: BBB GPIO pins for tone selector.
"""
# Set up GPIO.
self.go = False
self.pin_enable = pin_enable
GPIO.setup(pin_enable, GPIO.OUT)
self.pins_selector = pins_selector
for pin in pins_selector:
GPIO.setup(pin, GPIO.OUT)
return None
def __del__(self):
self.stop()
return
def start(self):
"""
start: Start the alarm.
"""
logging.debug("alarm: started")
self.go = True
GPIO.output(self.pin_enable, GPIO.HIGH)
def strobe(self, tone, time_spec):
"""
strobe: Strobe alarm tone according to time.
tone: Tone or alias to produce.
time_spec: List of times for toggle (s).
"""
self.setTone(tone)
t = threading.Thread(target=self.__handler, args=(time_spec,))
t.start()
def __handler(self, time_spec):
logging.debug("alarm: strobe started")
for t in time_spec:
self.toggle()
time.sleep(t)
self.toggle()
logging.debug("alarm: strobe stopped")
def setTone(self, tone):
"""
setTone: Set the alarm tone.
tone: Tone number or alias to produce.
"""
# Determine tone number (decimal).
if tone in self.aliases:
tone = self.aliases[tone]
elif tone in range(1, 33):
tone -= 1
else:
logging.debug("setTone: invalid tone number or alias")
return -1
# Pause alarm if needed.
if self.go:
self.stop()
self.go = True
# Write tone selector bits.
if (tone & 0x1):
GPIO.output(self.pins_selector[0], GPIO.HIGH)
else:
GPIO.output(self.pins_selector[0], GPIO.LOW)
if (tone & 0x2):
GPIO.output(self.pins_selector[1], GPIO.HIGH)
else:
GPIO.output(self.pins_selector[1], GPIO.LOW)
if (tone & 0x4):
GPIO.output(self.pins_selector[2], GPIO.HIGH)
else:
GPIO.output(self.pins_selector[2], GPIO.LOW)
if (tone & 0x8):
GPIO.output(self.pins_selector[3], GPIO.HIGH)
else:
GPIO.output(self.pins_selector[3], GPIO.LOW)
if (tone & 0x10):
GPIO.output(self.pins_selector[4], GPIO.HIGH)
else:
GPIO.output(self.pins_selector[4], GPIO.LOW)
logging.debug("setTone: %d" %(tone))
# Resume alarm if needed.
if self.go:
self.start()
def toggle(self):
"""
toggle: Toggle alarm state.
"""
if self.go:
self.stop()
else:
self.start()
def stop(self):
"""
stop: Stop the alarm.
"""
logging.debug("alarm: stopped")
self.go = False
GPIO.output(self.pin_enable, GPIO.LOW)
| gpl-3.0 | -8,399,425,728,887,080,000 | 24.938776 | 104 | 0.520325 | false |
tianon/moxie | moxie/models.py | 1 | 4573 | # Copyright (c) Paul R. Tagliamonte <[email protected]>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (Table, Column, ForeignKey, UniqueConstraint,
Integer, String, DateTime, Boolean, MetaData,
Interval, DateTime, Text)
metadata = MetaData()
Base = declarative_base(metadata=metadata)
class Job(Base):
__tablename__ = 'job'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
description = Column(String(255))
command = Column(String(255))
image = Column(String(255))
scheduled = Column(DateTime)
interval = Column(Interval)
active = Column(Boolean)
manual = Column(Boolean)
env_id = Column(Integer, ForeignKey('env_set.id'))
env = relationship(
"EnvSet",
foreign_keys=[env_id],
backref='jobs'
)
volumes_id = Column(Integer, ForeignKey('volume_set.id'))
volumes = relationship(
"VolumeSet",
foreign_keys=[volumes_id],
backref='jobs'
)
link_id = Column(Integer, ForeignKey('link_set.id'))
links = relationship(
"LinkSet",
foreign_keys=[link_id],
backref='jobs'
)
maintainer_id = Column(Integer, ForeignKey('maintainer.id'))
maintainer = relationship(
"Maintainer",
foreign_keys=[maintainer_id],
backref='jobs'
)
class Maintainer(Base):
__tablename__ = 'maintainer'
id = Column(Integer, primary_key=True)
name = Column(String(255))
email = Column(String(255), unique=True)
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(255))
email = Column(String(255), unique=True)
fingerprint = Column(String(255), unique=True)
class Run(Base):
__tablename__ = 'run'
id = Column(Integer, primary_key=True)
failed = Column(Boolean)
job_id = Column(Integer, ForeignKey('job.id'))
job = relationship("Job", foreign_keys=[job_id], backref='runs')
log = Column(Text)
start_time = Column(DateTime)
end_time = Column(DateTime)
class LinkSet(Base):
__tablename__ = 'link_set'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
class Link(Base):
__tablename__ = 'link'
id = Column(Integer, primary_key=True)
link_set_id = Column(Integer, ForeignKey('link_set.id'))
link_set = relationship("LinkSet", foreign_keys=[link_set_id], backref='links')
remote = Column(String(255))
alias = Column(String(255))
class EnvSet(Base):
__tablename__ = 'env_set'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
class VolumeSet(Base):
__tablename__ = 'volume_set'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
class Env(Base):
__tablename__ = 'env'
id = Column(Integer, primary_key=True)
env_set_id = Column(Integer, ForeignKey('env_set.id'))
env_set = relationship("EnvSet", foreign_keys=[env_set_id], backref='values')
key = Column(String(255))
value = Column(String(255))
class Volume(Base):
__tablename__ = 'volume'
id = Column(Integer, primary_key=True)
volume_set_id = Column(Integer, ForeignKey('volume_set.id'))
volume_set = relationship("VolumeSet", foreign_keys=[volume_set_id], backref='values')
host = Column(String(255))
container = Column(String(255))
| mit | 3,312,162,262,708,882,000 | 30.321918 | 90 | 0.67002 | false |
bderembl/mitgcm_configs | eddy_iwave/analysis/azimuthal_average.py | 1 | 9428 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.interpolate as spint
import scipy.spatial.qhull as qhull
import itertools
import MITgcmutils as mit
import f90nml
plt.ion()
def interp_weights(xyz, uvw):
naux,d = xyz.shape
tri = qhull.Delaunay(xyz)
simplex = tri.find_simplex(uvw)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = uvw - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
def interpolate(values, vtx, wts, fill_value=np.nan):
ret = np.einsum('nj,nj->n', np.take(values, vtx), wts)
ret[np.any(wts < 0, axis=1)] = fill_value
return ret
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
dir0 = '/run/media/bderembl/girtab/eddy-iwave/run13/'
#dir0 = '/home/bderembl/work/MITgcm/mitgcm_configs/eddy_iwave/run/'
file1 = 'diagU*'
file2 = 'diagV*'
file3 = 'diagSurf*'
file4 = 'U*'
file5 = 'V*'
file6 = 'W*'
#%==================== LOAD GRID ===================================
nml = f90nml.read(dir0+'data')
nmldiag = f90nml.read(dir0+'data.diagnostics')
# load grid
XC = mit.rdmds(dir0+'XC*')
YC = mit.rdmds(dir0+'YC*')
XG = mit.rdmds(dir0+'XG*')
YG = mit.rdmds(dir0+'YG*')
hFacC = mit.rdmds(dir0+'hFacC*')
hFacS = mit.rdmds(dir0+'hFacS*')
hFacW = mit.rdmds(dir0+'hFacW*')
RAS = mit.rdmds(dir0+'RAS*')
RAW = mit.rdmds(dir0+'RAW*')
RAC = mit.rdmds(dir0+'RAC*')
RAZ = mit.rdmds(dir0+'RAZ*')
RC = mit.rdmds(dir0+'RC*')
RF = mit.rdmds(dir0+'RF*')
DRC = mit.rdmds(dir0+'DRC*')
DRF = mit.rdmds(dir0+'DRF*')
Depth = mit.rdmds(dir0+'Depth*')
dt = nml['parm03']['deltat']
f0 = nml['parm01']['f0']
hFacC2 = np.where(hFacC != 1, np.nan,1.0)
hFacS2 = np.where(hFacS != 1, np.nan,1.0)
hFacW2 = np.where(hFacW != 1, np.nan,1.0)
iz = np.argmin(np.abs(RC+np.min(Depth)))
si_z,si_y,si_x = hFacC.shape
Lx = XC[-1,-1] + XC[0,0]
Ly = YC[-1,-1] + YC[0,0]
dx = 2*XC[0,0]
dy = 2*YC[0,0]
xy_g = np.vstack((XG.flatten(), YG.flatten())).T
xy_u = np.vstack((XG.flatten(), YC.flatten())).T
xy_v = np.vstack((XC.flatten(), YG.flatten())).T
xy_c = np.vstack((XC.flatten(), YC.flatten())).T
iters1 = mit.mds.scanforfiles(dir0 + file1)
iters4 = mit.mds.scanforfiles(dir0 + file4)
# ==== eddy parameters (cf. mygendata) ========
x_c = Lx/2
y_c = Ly/2
R0 = 14e3
velmax = 0.1
rr = np.linspace(0.0,0.5*Lx, np.int(0.5*si_x)+1)
theta = np.linspace(0.0,2*np.pi, np.int(np.pi*Lx/dx))
theta = theta[:-1]
rg,tg = np.meshgrid(rr,theta)
si_t,si_r = rg.shape
dr = rr[1] - rr[0]
rr2 = rr[:-1] + rr[1:]
x_rt = rg*np.cos(tg) + x_c
y_rt = rg*np.sin(tg) + y_c
xy_rt = np.vstack((x_rt.flatten(), y_rt.flatten())).T
vtx_g, wts_g = interp_weights(xy_g, xy_rt)
vtx_u, wts_u = interp_weights(xy_u, xy_rt)
vtx_v, wts_v = interp_weights(xy_v, xy_rt)
vtx_c, wts_c = interp_weights(xy_c, xy_rt)
# grid at U,V,T points
rad_gg = np.sqrt((XG-x_c)**2 + (YG-y_c)**2)
rad_cc = np.sqrt((XC-x_c)**2 + (YC-y_c)**2)
rad_gu = np.sqrt((XG-x_c)**2 + (YC-y_c)**2)
rad_gv = np.sqrt((XC-x_c)**2 + (YG-y_c)**2)
theta_gg = np.arctan2(YG-y_c,XG-x_c)
theta_cc = np.arctan2(YC-y_c,XC-x_c)
theta_gu = np.arctan2(YC-y_c,XG-x_c)
theta_gv = np.arctan2(YG-y_c,XC-x_c)
# vortex
def vel_rankine(rr):
v = -velmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2)
v = np.where(rr == 0, 0.0,v)
return v
#%==================== LOAD FIELDS ===================================
i = 1
udissv = mit.rdmds(dir0 + file1,iters1[i],rec=1)
vdissv = mit.rdmds(dir0 + file2,iters1[i],rec=1)
uvel = mit.rdmds(dir0 + file4,iters4[i])
vvel = mit.rdmds(dir0 + file5,iters4[i])
wvel = mit.rdmds(dir0 + file6,iters4[i])
#uvel0 = mit.rdmds(dir0 + file4,iters4[i-1])
#vvel0 = mit.rdmds(dir0 + file5,iters4[i-1])
ur_me = np.zeros((si_z,si_r))
ut_me = np.zeros((si_z,si_r))
ur_me1 = np.zeros((si_z,si_r))
ut_me1 = np.zeros((si_z,si_r))
w_me1 = np.zeros((si_z,si_r))
#ut0_me = np.zeros((si_z,si_r))
urdissv_me = np.zeros((si_z,si_r))
utdissv_me = np.zeros((si_z,si_r))
stress1 = np.zeros((si_z+1,si_r))
stress2 = np.zeros((si_z,si_r))
stress3 = np.zeros((si_z+1,si_r))
# set topography points to nans
uvel = uvel*hFacW2
vvel = vvel*hFacS2
wvel = wvel*hFacC2
for k in range(0,si_z-1):
udissv[k,:,:] = (udissv[k+1,:,:] - udissv[k,:,:])/(RAW*DRF[k]*hFacW[k,:,:])
vdissv[k,:,:] = (vdissv[k+1,:,:] - vdissv[k,:,:])/(RAS*DRF[k]*hFacS[k,:,:])
udissv[si_z-1,:,:] = 0.0
vdissv[si_z-1,:,:] = 0.0
for k in range(0,si_z-1):
uvel_pol = interpolate(uvel[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vvel_pol = interpolate(vvel[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
uvel_pol1 = interpolate(uvel[k+1,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vvel_pol1 = interpolate(vvel[k+1,:,:], vtx_v, wts_v).reshape((si_t,si_r))
wvel_pol1 = interpolate(wvel[k+1,:,:], vtx_c, wts_c).reshape((si_t,si_r))
udissv_pol = interpolate(udissv[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
vdissv_pol = interpolate(vdissv[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
# u and v at vertical cell face
uvel_pol1 = 0.5*(uvel_pol + uvel_pol1)
vvel_pol1 = 0.5*(vvel_pol + vvel_pol1)
ur = np.cos(tg)*uvel_pol + np.sin(tg)*vvel_pol
ut = -np.sin(tg)*uvel_pol + np.cos(tg)*vvel_pol
ur1 = np.cos(tg)*uvel_pol1 + np.sin(tg)*vvel_pol1
ut1 = -np.sin(tg)*uvel_pol1 + np.cos(tg)*vvel_pol1
urdissv = np.cos(tg)*udissv_pol + np.sin(tg)*vdissv_pol
utdissv = -np.sin(tg)*udissv_pol + np.cos(tg)*vdissv_pol
ur_me[k,:] = np.nanmean(ur,axis=0)
ut_me[k,:] = np.nanmean(ut,axis=0)
ur_me1[k,:] = np.nanmean(ur1,axis=0)
ut_me1[k,:] = np.nanmean(ut1,axis=0)
w_me1 [k,:] = np.nanmean(wvel_pol1,axis=0)
urdissv_me[k,:] = np.nanmean(urdissv,axis=0)
utdissv_me[k,:] = np.nanmean(utdissv,axis=0)
# uvel_pol = interpolate(uvel0[k,:,:], vtx_u, wts_u).reshape((si_t,si_r))
# vvel_pol = interpolate(vvel0[k,:,:], vtx_v, wts_v).reshape((si_t,si_r))
# ut0 = -np.sin(tg)*uvel_pol + np.cos(tg)*vvel_pol
# ut0_me[k,:] = np.nanmean(ut0,axis=0)
stress1[k+1,:] = -np.nanmean((ut1 - ut_me1[k,:])*(wvel_pol1 - w_me1[k,:]),axis=0)
stress2[k,:] = -np.nanmean(rr.reshape((1,si_r))*(ut - ut_me[k,:])*(ur - ur_me[k,:]),axis=0)
# minus DRF because diff done downward
stressdiv1 = np.diff(stress1,1,0)/(-DRF[:,0,:])
stressdiv2 = 1/rr2.reshape((1,si_r-1))*np.diff(stress2,1,1)/dr
stressdiv = stressdiv1[:,1:] + stressdiv2
dutdz = np.diff(ut_me,1,0)/(-DRF[:-1,0,:])
#================ Plot part ================
def rzplot(psi,*args, **kwargs):
vmax = np.max(np.abs((psi)))
vmax = kwargs.get('vmax', vmax)
vmin = -vmax
psi = np.where(psi<vmin,vmin,psi)
psi = np.where(psi>vmax,vmax,psi)
title = kwargs.get('title',None)
plt.figure()
plt.contourf(rr*1e-3,RC[:,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax,extend='both')
plt.colorbar(format='%.0e')
plt.contour(rr*1e-3,RC[:,0,0],ut_me,np.linspace(-0.2,0.2,17),colors='k',linewidths=0.5)
plt.xlabel('r (km)')
plt.ylabel('z (m)')
plt.title(title)
vmaxall = 3e-7
psi = -f0*ur_me
rzplot(psi,title=r"$-fU_r$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('ucori.png',bbox_inches='tight')
# psi = rr*ut_me + 0.5*f0*rr**2
# rzplot(psi,title=r"$\lambda$ (m$^2$\,s$^{-1}$)")
# #psi = (ut_me-vel_rankine(rr))/(iters4[1]*dt)
# psi = (ut_me-ut0_me)/((iters4[1]-iters4[0])*dt)
# rzplot(psi,title=r"$du_\theta/dt$ (m\,s$^{-2}$)",vmax=vmaxall)
psi = stressdiv1
rzplot(psi,title=r"$\partial \overline{u'_\theta w'}/\partial z$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('dupwpdz.png',bbox_inches='tight')
psi = utdissv_me
rzplot(psi,title=r"$\nu d^2 u_\theta/dz^2$ (m\,s$^{-2}$)",vmax=vmaxall)
plt.savefig('uvisc.png',bbox_inches='tight')
# vmin = -3e-7
# vmax = -vmin
# psi = stressdiv1[:iz-1,:]
# psi = np.where(psi<vmin,vmin,psi)
# psi = np.where(psi>vmax,vmax,psi)
# plt.figure()
# plt.contourf(rr*1e-3,RC[:iz-1,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax)
# plt.colorbar(format='%.0e',label='m/s2')
# plt.contour(rr*1e-3,RC[:,0,0],ut_me,5,colors='k',linewidths=0.5)
# plt.xlabel('r (km)')
# plt.ylabel('z (m)')
# plt.title(r"$\partial \overline{u'_\theta w'}/\partial z$")
# vmin = -3e-7
# vmax = -vmin
# #psi = (ut_me[:iz-1,:]-vel_rankine(rr))/(iters1[1]*dt)
# #psi = -(uvel[:iz-1,499:,500]-uvel0[:iz-1,499:,500])/(iters1[1]*dt)
# #psi = -(udiss[:iz-1,499:,500])
# plt.figure()
# plt.contourf(rr*1e-3,RC[:iz-1,0,0],psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax,extend='both')
# plt.colorbar(format='%.0e',label='m/s2')
# plt.contour(rr*1e-3,RC[:,0,0],ut_me,5,colors='k',linewidths=0.5)
# plt.xlabel('r (km)')
# plt.ylabel('z (m)')
# plt.title(r"$\nu d^2 u_\theta/dz^2$")
# # fit
# # itaudrag = stressdiv1[:iz-1,:]/ut_me[:iz-1,:]
# # # mean over the eddy
# # itaudrag_me = itaudrag[:,50:250].mean(axis=1)
# # #plt.contourf(rr*1e-3,RC[:iz-1,0,0],itaudrag,100,cmap=plt.cm.seismic)
# # #plt.colorbar(format='%.0e',label='1/s')
# # plt.figure();
# # plt.plot(RC[:iz-1,0,0],1/(itaudrag_me*86400))
# # # fit
# # plt.plot(RC[:iz-1,0,0],1/(-0.3*(np.exp((-RC[:iz-1,0,0]-3800)/100)-1.6e-5*RC[:iz-1,0,0])))
# d2udz2 = np.diff(np.diff(uvel,1,0),1,0)/DRF[1:-1]**2
# nu = 1e-2
# plt.figure()
# plt.pcolormesh(nu*d2udz2[:iz-1,499:,500],cmap=plt.cm.seismic,vmin=vmin,vmax=vmax)
# plt.colorbar(format='%.0e',label='m/s2')
| mit | -4,567,548,868,622,510,000 | 28.098765 | 99 | 0.599703 | false |
Grumbel/scatterbackup | scatterbackup/cmd_du.py | 1 | 2219 | # ScatterBackup - A chaotic backup solution
# Copyright (C) 2016 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import scatterbackup
import scatterbackup.database
import scatterbackup.config
from scatterbackup.units import bytes2human_decimal
from scatterbackup.util import sb_init
def parse_args():
parser = argparse.ArgumentParser(description='Print disk usage')
parser.add_argument('PATH', action='store', type=str, nargs='*',
help='Path to process')
parser.add_argument('-d', '--database', type=str, default=None,
help="Store results in database")
parser.add_argument('-c', '--config', type=str, default=None,
help="Load configuration file")
parser.add_argument('-s', '--summarize', action='store_true', default=False,
help="Only display summary")
return parser.parse_args()
def main():
sb_init()
args = parse_args()
cfg = scatterbackup.config.Config()
cfg.load(args.config)
db = scatterbackup.database.Database(args.database or scatterbackup.util.make_default_database())
file_count = 0
total_bytes = 0
for path in args.PATH:
path = os.path.abspath(path)
print(path)
for fileinfo in db.get_by_glob(path):
if not args.summarize:
print("{:10} {}".format(fileinfo.size, fileinfo.path))
file_count += 1
total_bytes += fileinfo.size
print("Total: {} in {} files".format(bytes2human_decimal(total_bytes), file_count))
# EOF #
| gpl-3.0 | 2,194,490,886,791,235,600 | 33.138462 | 101 | 0.671474 | false |
thefirstwind/s3qloss | src/s3ql/statfs.py | 1 | 2913 | '''
statfs.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <[email protected]>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
from .common import CTRL_NAME, QuietError, setup_logging
from .parse_args import ArgumentParser
import llfuse
import logging
import os
import posixpath
import struct
import sys
log = logging.getLogger("stat")
def parse_args(args):
'''Parse command line'''
parser = ArgumentParser(
description="Print file system statistics.")
parser.add_debug()
parser.add_quiet()
parser.add_version()
parser.add_argument("mountpoint", metavar='<mountpoint>',
type=(lambda x: x.rstrip('/')),
help='Mount point of the file system to examine')
return parser.parse_args(args)
def main(args=None):
'''Print file system statistics to sys.stdout'''
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
mountpoint = options.mountpoint
# Check if it's a mount point
if not posixpath.ismount(mountpoint):
raise QuietError('%s is not a mount point' % mountpoint)
# Check if it's an S3QL mountpoint
ctrlfile = os.path.join(mountpoint, CTRL_NAME)
if not (CTRL_NAME not in llfuse.listdir(mountpoint)
and os.path.exists(ctrlfile)):
raise QuietError('%s is not a mount point' % mountpoint)
if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0:
raise QuietError('Only root and the mounting user may run s3qlstat.')
# Use a decent sized buffer, otherwise the statistics have to be
# calculated thee(!) times because we need to invoce getxattr
# three times.
buf = llfuse.getxattr(ctrlfile, b's3qlstat', size_guess=256)
(entries, blocks, inodes, fs_size, dedup_size,
compr_size, db_size) = struct.unpack('QQQQQQQ', buf)
p_dedup = dedup_size * 100 / fs_size if fs_size else 0
p_compr_1 = compr_size * 100 / fs_size if fs_size else 0
p_compr_2 = compr_size * 100 / dedup_size if dedup_size else 0
mb = 1024 ** 2
print ('Directory entries: %d' % entries,
'Inodes: %d' % inodes,
'Data blocks: %d' % blocks,
'Total data size: %.2f MiB' % (fs_size / mb),
'After de-duplication: %.2f MiB (%.2f%% of total)'
% (dedup_size / mb, p_dedup),
'After compression: %.2f MiB (%.2f%% of total, %.2f%% of de-duplicated)'
% (compr_size / mb, p_compr_1, p_compr_2),
'Database size: %.2f MiB (uncompressed)' % (db_size / mb),
'(some values do not take into account not-yet-uploaded dirty blocks in cache)',
sep='\n')
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | 4,703,652,213,013,564,000 | 33.270588 | 91 | 0.621009 | false |
stadtgestalten/stadtgestalten | grouprise/features/polls/forms.py | 1 | 6496 | import django
from django import forms
from django.core.exceptions import ObjectDoesNotExist
from grouprise.features.content import forms as content
from grouprise.features.content.models import Content
from . import models
SimpleOptionFormSet = forms.modelformset_factory(
models.SimpleOption, fields=('title',), labels={'title': 'Antwort'}, min_num=1,
validate_min=True, can_delete=True)
EventOptionFormSet = forms.modelformset_factory(
models.EventOption, fields=('time', 'until_time'),
labels={'time': 'Datum / Zeit', 'until_time': 'Ende'},
min_num=1, validate_min=True, can_delete=True)
class OptionMixin:
def is_valid(self):
return super().is_valid() and self.options.is_valid()
def save_content_relations(self, commit):
# FIXME: remove when django bug #28988 is fixed
if not self.instance.container.poll:
self.instance.container.poll = models.WorkaroundPoll.objects.create()
self.instance.container.save()
for form in self.options.forms:
form.instance.poll = self.instance.container.poll
self.options.save(commit)
class Create(OptionMixin, content.Create):
# FIXME: replace by models.Poll when django bug #28988 is fixed
container_class = Content
text = forms.CharField(label='Beschreibung / Frage', widget=forms.Textarea({'rows': 2}))
poll_type = forms.ChoiceField(
label='Art der Antwortmöglichkeiten',
choices=[('simple', 'einfacher Text'), ('event', 'Datum / Zeit')],
initial='simple', widget=forms.Select({'data-poll-type': ''}))
vote_type = forms.ChoiceField(
label='Art der Abstimmmöglichkeiten',
choices=[('simple', 'Ja/Nein/Vielleicht'),
('condorcet', 'Stimmen ordnen (rangbasiert)')],
initial='simple', widget=forms.Select({'data-poll-vote-type': ''}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_type_change = self.data.get('change_type')
# init options
if self.data.get('poll_type') == 'event':
self.options = EventOptionFormSet(
data=kwargs.get('data'), queryset=models.EventOption.objects.none())
else:
self.options = SimpleOptionFormSet(
data=kwargs.get('data'), queryset=models.SimpleOption.objects.none())
self.options.extra = 0
# permit empty form in case of type change
if self.is_type_change:
self.empty_permitted = True
for form in self.options.forms:
form.empty_permitted = True
def is_valid(self):
# prevent saving in case of type change
return False if self.is_type_change else super().is_valid()
def save(self, commit=True):
association = super().save(commit)
association.container.poll.condorcet = self.cleaned_data['vote_type'] == 'condorcet'
association.container.poll.save()
if commit:
self.send_post_create()
return association
class Update(OptionMixin, content.Update):
text = forms.CharField(label='Beschreibung / Frage', widget=forms.Textarea({'rows': 2}))
poll_type = forms.CharField(widget=forms.HiddenInput({'data-poll-type': ''}))
vote_type = forms.CharField(widget=forms.HiddenInput({'data-poll-vote-type': ''}))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
poll = self.instance.container.poll
self.fields['vote_type'].initial = str(poll.vote_type)
try:
models.Option.objects.filter(poll=poll).first().eventoption
self.options = EventOptionFormSet(
data=kwargs.get('data'),
queryset=models.EventOption.objects.filter(poll=poll))
self.fields['poll_type'].initial = 'event'
except ObjectDoesNotExist:
self.options = SimpleOptionFormSet(
data=kwargs.get('data'),
queryset=models.SimpleOption.objects.filter(poll=poll))
self.fields['poll_type'].initial = 'simple'
self.options.extra = 0
def get_initial_for_field(self, field, field_name):
if field_name == 'poll_type':
return {
EventOptionFormSet: 'event',
SimpleOptionFormSet: 'simple'
}[type(self.options)]
else:
return super().get_initial_for_field(field, field_name)
SimpleVoteFormSet = forms.modelformset_factory(
models.SimpleVote, fields=('endorse',), labels={'endorse': 'Zustimmung'},
widgets={'endorse': forms.RadioSelect(
choices=[(True, 'Ja'), (False, 'Nein'), (None, 'Vielleicht')])})
CondorcetVoteFormSet = forms.modelformset_factory(
models.CondorcetVote, fields=('rank',), labels={'rank': 'Rang / Platz'})
class Vote(forms.ModelForm):
class Meta:
model = models.SimpleVote
fields = ('anonymous',)
labels = {'anonymous': 'Name/Alias'}
def __init__(self, poll, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.voter and self.instance.voter.user.is_authenticated:
del self.fields['anonymous']
else:
self.fields['anonymous'].required = True
self.poll = poll
options = poll.options.all()
if self.poll.condorcet:
self.votes = CondorcetVoteFormSet(
data=kwargs.get('data'), queryset=models.SimpleVote.objects.none())
else:
self.votes = SimpleVoteFormSet(
data=kwargs.get('data'), queryset=models.SimpleVote.objects.none())
self.votes.extra = len(options)
for i, form in enumerate(self.votes.forms):
form.instance.option = options[i]
def clean_anonymous(self):
anon = self.cleaned_data['anonymous']
if models.SimpleVote.objects.filter(option__poll=self.poll, anonymous=anon).exists():
raise django.forms.ValidationError('%s hat bereits abgestimmt.' % anon)
return anon
def is_valid(self):
return super().is_valid() and self.votes.is_valid()
def save(self, commit=True):
vote = super().save(False)
for form in self.votes.forms:
form.instance.anonymous = self.instance.anonymous
form.instance.voter = self.instance.voter
form.save(commit)
return vote
| agpl-3.0 | 1,499,144,372,932,176,100 | 38.120482 | 93 | 0.615491 | false |
sdenton4/pipic | loadout.py | 1 | 2850 | #!/usr/bin/python
import sys, getopt
import subprocess
import random
import os, shutil
usageString='loadOut.py -r <target root directory> -n <hostname>'
def placefile( filename, targetdir ):
command='cp '+filename+' '+targetdir
subprocess.call(command, shell=True)
hostcolors=['red','blue','green','yellow','orange','purple','brown',]
hostcolors+=['white','black','cyan','burgundy','vermillion','aqua','maroon',]
hostcolors+=['teal','taupe','neon']
def main(argv):
try:
opts, args = getopt.getopt(argv,"hr:n:", ["root=", "hostname=",])
except getopt.GetoptError:
print usageString
sys.exit(2)
#default value for pi root is None
piroot=None
hostname=""
for opt, arg in opts:
if opt == '-h':
print usageString
sys.exit()
elif opt in ("-r", "--root"):
print opt, arg.strip()
piroot = arg.strip()
if piroot[-1]!='/': piroot.append('/')
try:
os.listdir(piroot)
except:
print "Root directory not found."
sys.exit(2)
elif opt in ("-n", "--hostname"):
hostname = arg.strip()
if hostname not in hostcolors:
print "Not a defined hostname. Try one of these, or specify none and I'll pick one at random:"
print hostcolors
return False
if hostname=="": hostname=hostcolors[random.randint(0,len(hostcolors))]
#Place configuration files and scripts.
files={
'config/crontab': 'etc/crontab',
'config/networks': 'etc/networks',
'config/bash.bashrc': 'etc/bash.bashrc',
'config/wpa_supplicant.conf': 'etc/wpa_supplicant/wpa_supplicant.conf',
'photoscript.py': 'home/pi/photoscript.py',
'timelapse.py': 'home/pi/timelapse.py',
'deflicker.py': 'home/pi/deflicker.py',
'tempgauge.py': 'home/pi/tempgauge.py',
}
for f in files.keys():
placefile(f, piroot + files[f])
#Copy over program archives.
for x in os.listdir('archives'):
shutil.copy('archives/'+x, piroot+'var/cache/apt/archives/')
#Write network config for eth0.
f=open(piroot+'etc/networks', 'a')
f.write('\n\n iface eth0 inet static\n')
f.write(' address 192.168.0.'+str(hostcolors.index(hostname))+'\n')
f.write(' netmask 255.255.255.0\n')
f.write(' gateway 192.168.0.254\n')
f.close()
#Change time zone.
f=open(piroot+'etc/timezone', 'w')
f.write('America/Toronto\n')
f.close()
#Change hostname.
f=open(piroot+'etc/hostname', 'w')
f.write(hostname+'\n')
f.close()
#Make pictures directory.
try:
subprocess.call('mkdir '+piroot+'home/pi/pictures', shell=True)
except:
pass
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 | 5,719,356,166,680,423,000 | 29.645161 | 107 | 0.583509 | false |
junpenglao/GLMM-in-Python | misc/pymc3_hierarchical_example.py | 1 | 2566 | # -*- coding: utf-8 -*-
"""
example from pymc-dev
"""
import pymc3 as pm
import theano.tensor as T
from numpy import random, sum as nsum, ones, concatenate, newaxis, dot, arange
import numpy as np
random.seed(1)
n_groups = 10
no_pergroup = 30
n_observed = no_pergroup * n_groups
n_group_predictors = 1
n_predictors = 3
group = concatenate([[i] * no_pergroup for i in range(n_groups)])
group_predictors = random.normal(size=(n_groups, n_group_predictors)) # random.normal(size = (n_groups, n_group_predictors))
predictors = random.normal(size=(n_observed, n_predictors))
group_effects_a = random.normal(size=(n_group_predictors, n_predictors))
effects_a = random.normal(
size=(n_groups, n_predictors)) + dot(group_predictors, group_effects_a)
y = nsum(
effects_a[group, :] * predictors, 1) + random.normal(size=(n_observed))
model = pm.Model()
with model:
# m_g ~ N(0, .1)
group_effects = pm.Normal(
"group_effects", 0, .1, shape=(n_group_predictors, n_predictors))
gp = pm.Normal("gp", 0, .1, shape=(n_groups,1))
# gp = group_predictors
# sg ~ Uniform(.05, 10)
sg = pm.Uniform("sg", .05, 10, testval=2.)
# m ~ N(mg * pg, sg)
effects = pm.Normal("effects",
T.dot(gp, group_effects), sg ** -2,
shape=(n_groups, n_predictors))
s = pm.Uniform("s", .01, 10, shape=n_groups)
g = T.constant(group)
# y ~ Normal(m[g] * p, s)
mu_est = pm.Deterministic("mu_est",T.sum(effects[g] * predictors, 1))
yd = pm.Normal('y',mu_est , s[g] ** -2, observed=y)
start = pm.find_MAP()
#h = find_hessian(start)
step = pm.NUTS(model.vars, scaling=start)
with model:
trace = pm.sample(3000, step, start)
#%%
pm.traceplot(trace)
dftmp = pm.df_summary(trace,varnames=['group_effects'])
print(dftmp['mean'])
import statsmodels.formula.api as smf
# from patsy import dmatrices
import pandas as pd
tbl = pd.DataFrame(predictors,columns=['C1','C2','C3'])
tbl['group'] = pd.Series(group, dtype="category")
tbl['yd'] = y
md2 = smf.mixedlm("yd ~ -1 + C1 + C2 + C3", tbl, groups=tbl["group"])
mdf2= md2.fit()
print(mdf2.summary())
#%%
X = np.tile(group_predictors[group],(1,3)) * predictors
beta0 = np.linalg.lstsq(X,y)
fitted = np.dot(X,beta0[0])
import matplotlib.pyplot as plt
plt.figure()
plt.plot(y,'k')
plt.plot(fitted,'g')
dftmp = pm.df_summary(trace[1000:],varnames=['mu_est'])
testdf = np.asarray(dftmp['mean'])
plt.plot(testdf,'r')
plt.legend(['observed',str(np.mean(np.square(y-fitted))),str(np.mean(np.square(y-testdf)))])
| gpl-3.0 | 3,814,466,537,459,863,000 | 27.511111 | 125 | 0.63523 | false |
jddixon/rnglib | setup.py | 1 | 1249 | #!/usr/bin/python3
# rnglib/setup.py
""" Setuptools project configuration for rnglib. """
from os.path import exists
from setuptools import setup
long_desc = None
if exists('README.md'):
with open('README.md', 'r') as file:
long_desc = file.read()
setup(name='rnglib',
version='1.3.10',
author='Jim Dixon',
author_email='[email protected]',
long_description=long_desc,
packages=['rnglib'],
package_dir={'': 'src'},
py_modules=[],
include_package_data=False,
zip_safe=False,
scripts=[],
ext_modules=[],
description='random number generator library',
url='https://jddixon.github.io/rnglib',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python 2',
'Programming Language :: Python 2.7',
'Programming Language :: Python 3',
'Programming Language :: Python 3.5',
'Programming Language :: Python 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
| mit | 6,274,665,253,406,631,000 | 30.225 | 73 | 0.590072 | false |
eduNEXT/edunext-ecommerce | ecommerce/enterprise/views.py | 1 | 3252 | # TODO: Refactor this to consolidate it with `ecommerce.programs.views`.
from __future__ import absolute_import
import logging
from django.contrib import messages
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, ListView, TemplateView, UpdateView
from oscar.core.loading import get_model
from ecommerce.core.views import StaffOnlyMixin
from ecommerce.enterprise.forms import EnterpriseOfferForm
from ecommerce.enterprise.utils import get_enterprise_customer
Benefit = get_model('offer', 'Benefit')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
logger = logging.getLogger(__name__)
class EnterpriseOfferViewMixin(StaffOnlyMixin):
model = ConditionalOffer
def get_context_data(self, **kwargs):
context = super(EnterpriseOfferViewMixin, self).get_context_data(**kwargs)
context['admin'] = 'enterprise_offers'
return context
def get_queryset(self):
return super(EnterpriseOfferViewMixin, self).get_queryset().filter(
partner=self.request.site.siteconfiguration.partner,
condition__enterprise_customer_uuid__isnull=False,
offer_type=ConditionalOffer.SITE
)
class EnterpriseOfferProcessFormViewMixin(EnterpriseOfferViewMixin):
form_class = EnterpriseOfferForm
success_message = _('Enterprise offer updated!')
def get_form_kwargs(self):
kwargs = super(EnterpriseOfferProcessFormViewMixin, self).get_form_kwargs()
kwargs.update({'request': self.request})
return kwargs
def get_context_data(self, **kwargs):
context = super(EnterpriseOfferProcessFormViewMixin, self).get_context_data(**kwargs)
context.update({
'editing': False,
})
return context
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return reverse('enterprise:offers:edit', kwargs={'pk': self.object.pk})
class EnterpriseOfferCreateView(EnterpriseOfferProcessFormViewMixin, CreateView):
initial = {
'benefit_type': Benefit.PERCENTAGE,
}
success_message = _('Enterprise offer created!')
template_name = 'enterprise/enterpriseoffer_form.html'
class EnterpriseOfferUpdateView(EnterpriseOfferProcessFormViewMixin, UpdateView):
template_name = 'enterprise/enterpriseoffer_form.html'
def get_context_data(self, **kwargs):
context = super(EnterpriseOfferUpdateView, self).get_context_data(**kwargs)
context.update({
'editing': True,
'enterprise_customer': get_enterprise_customer(
self.request.site,
self.object.condition.enterprise_customer_uuid
)
})
return context
class EnterpriseOfferListView(EnterpriseOfferViewMixin, ListView):
template_name = 'enterprise/enterpriseoffer_list.html'
class EnterpriseCouponAppView(StaffOnlyMixin, TemplateView):
template_name = 'enterprise/enterprise_coupon_app.html'
def get_context_data(self, **kwargs):
context = super(EnterpriseCouponAppView, self).get_context_data(**kwargs)
context['admin'] = 'enterprise_coupons'
return context
| agpl-3.0 | 7,264,125,542,828,652,000 | 34.736264 | 93 | 0.714637 | false |
gyoerkaa/mdltools | neverblender/nvb_aabb.py | 1 | 4220 | """TODO: DOC."""
import mathutils
def generate_tree(aabb_tree, face_list, rlevel=0):
"""TODO: DOC."""
if (rlevel > 128):
print('Neverblender - ERROR: Could not generate aabb.\
Recursion level exceeds 100')
aabb_tree = []
return
if not face_list:
# We are finished with the generation
return
# Calculate Bounding box centers and min/max coordinates
bb_min = mathutils.Vector((100000.0, 100000.0, 100000.0))
bb_max = mathutils.Vector((-100000.0, -100000.0, -100000.0))
bb_avgcentroid = mathutils.Vector((0.0, 0.0, 0.0))
for face in face_list:
face_vertices = face[1]
# Every vertex in the face
for vertex in face_vertices:
# We have to check 2x3 coordinates (min and max)
for ax in range(3):
# First the min
if bb_min[ax] > vertex[ax]:
bb_min[ax] = vertex[ax]
# Then the max
if bb_max[ax] < vertex[ax]:
bb_max[ax] = vertex[ax]
face_centroid = face[2]
bb_avgcentroid = bb_avgcentroid + face_centroid
bb_avgcentroid = bb_avgcentroid / len(face_list)
# bb_centroid = (bb_min + bb_max) / 2
if (len(face_list) == 1):
# Only one face left in face list
# This node is a leaf, save the face in the leaf
linked_face_idx = face_list[0][0]
aabb_treenode = [bb_min.x, bb_min.y, bb_min.z,
bb_max.x, bb_max.y, bb_max.z,
linked_face_idx]
aabb_tree.append(aabb_treenode)
else:
# This is a node in the tree
linked_face_idx = -1 # -1 indicates nodes
aabb_treenode = [bb_min.x, bb_min.y, bb_min.z,
bb_max.x, bb_max.y, bb_max.z,
linked_face_idx]
aabb_tree.append(aabb_treenode)
# Size of bounding box
bb_size = bb_max - bb_min
# Longest axis of bounding box
split_axis = 0 # x
if (bb_size.y > bb_size.x):
split_axis = 1 # y
if (bb_size.z > bb_size.y):
split_axis = 2 # z
# Change axis in case points are coplanar with
# the split plane
change_axis = True
for face in face_list:
face_centroid = face[2]
change_axis = change_axis and \
(face_centroid[split_axis] == bb_avgcentroid[split_axis])
if (change_axis):
split_axis += 1
if (split_axis >= 3):
split_axis = 0
# Put items on the left- and rightside of the splitplane
# into sperate lists
face_list_left = []
face_list_right = []
found_split = False
tested_axes = 1
while not found_split:
# Sort faces by side
face_list_left = []
face_list_right = []
leftside = True
for face in face_list:
face_centroid = face[2]
leftside = \
(face_centroid[split_axis] < bb_avgcentroid[split_axis])
if leftside:
face_list_left.append(face)
else:
face_list_right.append(face)
# Try to prevent tree degeneration
if (face_list_left) and (face_list_right):
# Neither list is empty, this split will do just fine
found_split = True
else:
# At least one of the list is empty
# Try another axis to prevent degeneration
tested_axes += 1
split_axis += 1
if (split_axis >= 3):
split_axis = 0
if (tested_axes >= 3):
# We have tried all axes, but
# the tree degenerates with each of them
# Just take the degenerate one
print('WARNING: Error generating aabb. Split problem.')
aabb_tree = []
return
generate_tree(aabb_tree, face_list_left, rlevel+1)
generate_tree(aabb_tree, face_list_right, rlevel+1)
| gpl-2.0 | -1,740,367,169,041,450,500 | 33.308943 | 76 | 0.50237 | false |
janhui/test_engine | release/mesh_netcdf/PreMeshingFunctions.py | 1 | 9107 | """
This class includes all of the functions that used by MeshOperations.py for the creation of the mesh.
"""
from StandardModules import *
import sys
from PyQt4.QtGui import *
from export_geo import *
class PreMesh(DefineDomain):
"""
Once the user has selected something from the drop-downs and clicked OK these functions retrieve the layer names as well as the source files of the
layers.
"""
def getNetCDFDropDownOptions(self):
self.singleNetCDFLayerText = self.dlg.ui.singleNetCDFLayerDropDown.currentText()
self.singleNetCDFLayerIndex = self.dlg.ui.singleNetCDFLayerDropDown.findText(self.singleNetCDFLayerText)
self.singleNetCDFLayerFileName = self.dlg.ui.singleNetCDFLayerDropDown.itemData(self.singleNetCDFLayerIndex).toString()
def getShapeDropDownOptions(self):
self.domainShapefileLayerText = self.dlg.ui.domainShapefileLayerDropDown.currentText()
self.domainShapefileLayerIndex = self.dlg.ui.domainShapefileLayerDropDown.findText(self.domainShapefileLayerText)
self.domainShapefileLayerFileName = self.dlg.ui.domainShapefileLayerDropDown.itemData(self.domainShapefileLayerIndex).toString()
def getMeshingAlgorithm(self):
self.meshingAlgorithmText = self.dlg.ui.meshingAlgorithmDropDown.currentText()
"""
Uses getGeoFile to convert the given domain Shapefile layer into a .geo file and edits its name.
"""
def convertShape(self):
getGeoFile(str(self.domainShapefileLayerFileName), str(self.domainShapefileLayerFileName[:-4]))
self.geoFileName = '%s.geo' % self.domainShapefileLayerFileName[:-4]
def define_bounds(self, ok):
DefineDomain.define_bounds(self, ok)
'''
Runs all the modules for id definition and runs an export module to create the geofile
Organises all the data for the id definitions and export. exports either to sphere or
plane.
'''
def runIdDef(self):
self.defID = int(str(self.dlg.ui.Default_Id.text()))
self.domainSavePath = '%s_idBoundary' % self.domainShapefileLayerFileName[:-4]
self.domainText = self.domainShapefileLayerFileName[:-4]
idText = self.dlg.ui.IdDropdown.currentText()
idIndex = self.dlg.ui.IdDropdown.findText(idText)
self.idFilePath = self.dlg.ui.IdDropdown.itemData(idIndex).toString()
self.threshold = 0.0
if self.dlg.ui.define_th.isChecked():
self.threshold = float(str(self.dlg.ui.Threshold.text()))
self.define_bounds(self.dlg.ui.grpDefID.isChecked())
# Write the Geo.
data = [self.domainData.regionIDs,self.domainData.shapes,self.boundaryIDList,self.domainData.points]
write_geo_file(self.domainSavePath,data)
"""
Retrieve the information from the drop-down boxes.
"""
def getFiles(self):
if self.dlg.ui.singleNetCDFChooseFilesRadioButton.isChecked():
self.singleNetCDFLayerFileName = self.dlg.ui.singleNetCDFChooseFilesLineEdit.text()
if ".nc" in str(self.singleNetCDFLayerFileName):
self.singleNetCDFLayerFileName = '%s' % self.singleNetCDFLayerFileName
else:
self.singleNetCDFLayerFileName = '%s.nc' % self.singleNetCDFLayerFileName
else:
self.getNetCDFDropDownOptions()
self.postviewFileName = '%s_meshing_posfile.pos' % self.singleNetCDFLayerFileName[:-3]
if self.dlg.ui.chooseGeoFileRadioButton.isChecked():
self.geoFileName = self.dlg.ui.chooseGeoFileLineEdit.text()
else:
self.getShapeDropDownOptions()
self.runIdDef()
self.geoFileName = '%s.geo' % self.domainSavePath
"""
Generates a PostView file for the use as mesh-size metric for planar domains. The three functions for the three
types of coordinate system used in NetCDFs: lat-lon, x-y, and x/y start/stop with x/y step.
"""
def writePosFile(self):
input_file = str(self.singleNetCDFLayerFileName)
output_file = str(self.postviewFileName)
# Lon-lat.
def create_pos(netcdf_file):
file = NetCDF.NetCDFFile(netcdf_file, 'r')
lon = file.variables['lon'][:]
lat = file.variables['lat'][:]
field = file.variables['z'][:, :]
pos_string = """View "background_edgelength" {\n"""
for i in range(0,len(lon)):
for j in range(0,len(lat)):
lat_p1 = lat[j]
lon_p1 = lon[i]
depth = abs(field[j][i])
# If a NetCDF has 0 value elements Gmsh will attempt to create an impossibly small mesh resulting in slow
# operation. This ensures that the .pos file created is usable.
if depth == 0:
depth = 0.001
line = "SP("+str(lon_p1)+","+str(lat_p1)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
pos_string = pos_string+"};"
return pos_string
# X/Y range.
def create_pos_xyrange(netcdf_file):
file = NetCDF.NetCDFFile(netcdf_file, 'r')
xMin = file.variables['x_range'][0]; xMax = file.variables['x_range'][1]
yMin = file.variables['y_range'][0]; yMax = file.variables['y_range'][1]
xSpace = file.variables['spacing'][0]; ySpace = file.variables['spacing'][1]
field = file.variables['z']
pos_string = """View "background_edgelength" {\n"""
y = yMax; count = 0; step = 1
xList = linspace(xMin, xMax, (1/xSpace)); yList = linspace(yMin, yMax, (1/ySpace))
while y >= yMin:
x = xMin
while x <= xMax and count < len(field):
depth = abs(field[count])
if depth == 0:
depth = 0.001
line = "SP("+str(x)+","+str(y)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
x += step*xSpace; count += step
y -= step*ySpace
pos_string = pos_string+"};"
return pos_string
# X-Y.
def create_pos_xy(netcdf_file):
# read netcdf file
file = NetCDF.NetCDFFile(netcdf_file, 'r')
x = file.variables['x'][:]
y = file.variables['y'][:]
field = file.variables['z'][:, :]
pos_string = """View "background_edgelength" {\n"""
for i in range(len(x)):
for j in range(len(y)):
y_p1 = y[j]
x_p1 = x[i]
depth = abs(field[j][i])
if depth == 0:
depth = 0.001
line = "SP("+str(x_p1)+","+str(y_p1)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
pos_string = pos_string+"};"
return pos_string
print "Writing PostView File..."
# Check the file variables so that the appropriate function can be called.
file = NetCDF.NetCDFFile(input_file, 'r')
variableNames = file.variables.keys()
if 'lon' in variableNames:
pos_string = create_pos(input_file)
elif 'x_range' in variableNames:
pos_string = create_pos_xyrange(input_file)
elif 'x' in variableNames:
pos_string = create_pos_xy(input_file)
else:
raise ErrorMessages.UnsuportedRasterVariableError(variableNames) #should work
f = open(output_file,'w')
f.write(pos_string)
f.close()
print "PostView File Written."
"""
Not in use. This functionality is now possible within RasterCalc.
Performed the calculation of the minimum of multiple NetCDF files using grdmath and imported the resulting file into QGIS
in pseudolcolour.
"""
def calculateMinimum(self):
# Get all of the active NetCDF layers.
self.activeNetCDFs = []
for layer in self.activeLayers:
if '.nc' in str(layer.source()):
self.activeNetCDFs.append([layer.name(), QVariant(str(layer.source()))])
for i in range(len(list(self.activeNetCDFs)) - 1):
# For the first iteration we need to use the top layer and the layer below and output to /tmp/tmp.tif.
if i == 0:
# Min of overlapping regions.
call (["/usr/lib/gmt/bin/grdmath", str(list(self.activeNetCDFs)[i][1].toString()), str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", "/tmp/tmp.tif"])
# After the first iteration we want to use the newly created tmp file and the next layer down.
if i > 0 and i < range(len(list(self.activeNetCDFs)) - 1)[-1]:
# Min of the newly created tmp and the next layer.
call (["/usr/lib/gmt/bin/grdmath", "/tmp/tmp.tif", str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", "/tmp/tmp.tif"])
# For the last iteration we need to convert the .tif to a .nc with the correct filename rather than tmp.tif. Uses the bottom layers name
# plus -minimum.nc.
if i == range(len(list(self.activeNetCDFs)) - 1)[-1]:
saveName = str(list(self.activeNetCDFs)[i + 1][1].toString())
saveName = saveName.replace(".nc", "-minimum.nc")
call (["/usr/lib/gmt/bin/grdmath", "/tmp/tmp.tif", str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", saveName])
# If check box is selected it will add the layer to canvas as pseudocolour.
if self.dlg.ui.addLayerToCanvasCheckBox.isChecked():
# Add the layer and convert it to pseudocolour.
fileInfo = QFileInfo(saveName)
baseName = fileInfo.baseName()
self.iface.addRasterLayer(saveName, baseName)
self.qgisCanvas = qgis.utils.iface.mapCanvas()
self.activeLayers = self.qgisCanvas.layers()
for layer in self.activeLayers:
if saveName in str(layer.source()):
layer.setDrawingStyle(QgsRasterLayer.SingleBandPseudoColor)
layer.setColorShadingAlgorithm(QgsRasterLayer.PseudoColorShader)
| lgpl-2.1 | 4,693,597,142,777,207,000 | 36.264706 | 148 | 0.679807 | false |
smahs/euler-py | 25.py | 1 | 1244 | #!/usr/bin/python2
"""
Statement:
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence
to contain 1000 digits?
"""
from unittest import TestCase, main
from decimal import Decimal
class Problem25(object):
def __init__(self, bound):
self.bound = bound
def fibn(self, n):
n = Decimal(n)
sqroot = Decimal(0.5)
return int(((1 + (5**sqroot)) ** n - (1 - (5**sqroot)) ** n) /
((2 ** n) * 5**sqroot))
def fn(self):
counter, length = 0, 0
while length < self.bound:
counter += 1
length = len(str(self.fibn(counter)))
return counter
class TestProblem25(TestCase):
def setUp(self):
self.bound = 1000
self.answer = 4782
def test_main(self):
self.assertEqual(Problem25(self.bound).fn(), self.answer)
if __name__ == '__main__':
main()
| mit | 3,202,610,991,643,990,500 | 18.138462 | 70 | 0.55627 | false |
alexandru/django-asynctasks | src/django_asynctasks/tasks.py | 1 | 1787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from functools import wraps
from inspect import getargspec
from django_asynctasks.models import AsyncTask
HIGH=1
NORMAL=2
LOW=3
def define(label, schedule=None, bucket=None, priority=2):
if not schedule:
schedule = 'onetime'
if not bucket:
bucket = '__default__'
def wrap(f):
function_namespace = f.__module__ + "." + f.__name__
@wraps(f)
def delay(self, *args, **kwargs):
override_priority = kwargs.get('priority') or priority or 2
override_bucket = kwargs.get('bucket') or bucket or None
when = 'onetime'
if kwargs.get('when'):
when = kwargs.pop('when')
if not kwargs.get('priority'): kwargs['priority'] = override_priority
if not kwargs.get('bucket'): kwargs['bucket'] = override_bucket
return AsyncTask.schedule(function_namespace, args=args, kwargs=kwargs,
label=label,
when=when,
bucket=override_bucket,
priority=override_priority)
delay.argspec = getargspec(f)
@wraps(f)
def run(self, *args, **kwargs):
return f(*args, **kwargs)
run.argspec = getargspec(f)
cls_dict = dict(run=run, delay=delay, __module__=f.__module__, schedule=schedule, label=label)
return type(f.__name__, (Task,), cls_dict)()
return wrap
class Task(object):
def run(self, *args, **kwargs):
pass
def delay(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
| bsd-2-clause | 3,137,311,310,393,275,000 | 30.350877 | 102 | 0.543369 | false |
toslunar/chainerrl | tests/misc_tests/test_copy_param.py | 1 | 3183 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases() # NOQA
import unittest
import chainer
from chainer import links as L
import numpy as np
from chainerrl.misc import copy_param
class TestCopyParam(unittest.TestCase):
def test_copy_param(self):
a = L.Linear(1, 5)
b = L.Linear(1, 5)
s = chainer.Variable(np.random.rand(1, 1).astype(np.float32))
a_out = list(a(s).array.ravel())
b_out = list(b(s).array.ravel())
self.assertNotEqual(a_out, b_out)
# Copy b's parameters to a
copy_param.copy_param(a, b)
a_out_new = list(a(s).array.ravel())
b_out_new = list(b(s).array.ravel())
self.assertEqual(a_out_new, b_out)
self.assertEqual(b_out_new, b_out)
def test_copy_param_scalar(self):
a = chainer.Chain()
with a.init_scope():
a.p = chainer.Parameter(np.array(1))
b = chainer.Chain()
with b.init_scope():
b.p = chainer.Parameter(np.array(2))
self.assertNotEqual(a.p.array, b.p.array)
# Copy b's parameters to a
copy_param.copy_param(a, b)
self.assertEqual(a.p.array, b.p.array)
def test_copy_param_type_check(self):
a = L.Linear(None, 5)
b = L.Linear(1, 5)
with self.assertRaises(TypeError):
# Copy b's parameters to a, but since `a` parameter is not
# initialized, it should raise error.
copy_param.copy_param(a, b)
def test_soft_copy_param(self):
a = L.Linear(1, 5)
b = L.Linear(1, 5)
a.W.array[:] = 0.5
b.W.array[:] = 1
# a = (1 - tau) * a + tau * b
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(a.W.array, np.full(a.W.shape, 0.55))
np.testing.assert_almost_equal(b.W.array, np.full(b.W.shape, 1.0))
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(a.W.array, np.full(a.W.shape, 0.595))
np.testing.assert_almost_equal(b.W.array, np.full(b.W.shape, 1.0))
def test_soft_copy_param_scalar(self):
a = chainer.Chain()
with a.init_scope():
a.p = chainer.Parameter(np.array(0.5))
b = chainer.Chain()
with b.init_scope():
b.p = chainer.Parameter(np.array(1))
# a = (1 - tau) * a + tau * b
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(a.p.array, 0.55)
np.testing.assert_almost_equal(b.p.array, 1.0)
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(a.p.array, 0.595)
np.testing.assert_almost_equal(b.p.array, 1.0)
def test_soft_copy_param_type_check(self):
a = L.Linear(None, 5)
b = L.Linear(1, 5)
with self.assertRaises(TypeError):
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
| mit | 1,693,299,567,431,025,700 | 30.514851 | 77 | 0.593465 | false |
texttheater/produce | doc/samples/tokenization/src/scripts/tables.py | 1 | 2032 | """Library for pretty-printing tables."""
import util
from util import out, nl
def print_table(table, rowsortkey=None, columnsortkey=None, defaultvalue=''):
# Get the row and column heads:
row_heads = list(table.keys())
column_heads = util.list_union(map(lambda x: x.keys(), table.values()))
if rowsortkey:
row_heads.sort(key=rowsortkey)
if columnsortkey:
column_heads.sort(key=columnsortkey)
# Determine the width of each column:
column_widths = {}
for column_head in column_heads:
column_widths[column_head] = max(len(str(column_head)),
len(str(defaultvalue)))
for row_head, row in table.items():
if column_head in row:
column_widths[column_head] = max(column_widths[column_head],
len(str(row[column_head])))
# Determine the width of the head column:
head_column_width = max(map(len, map(str, row_heads)))
# Print the table:
print_head_row(column_heads, head_column_width, column_widths)
for row_head in row_heads:
print_row(row_head, table[row_head], head_column_width, column_heads,
column_widths, defaultvalue)
def print_head_row(column_heads, head_column_width, column_widths):
out(' ' * head_column_width)
for column_head in column_heads:
width = column_widths[column_head]
print_cell(column_head, width)
print
def print_row(row_head, row, head_column_width, column_heads, column_widths,
defaultvalue):
print_cell(row_head, head_column_width, leftmargin=0)
for column_head in column_heads:
try:
content = row[column_head]
except KeyError:
content = defaultvalue
print_cell(content, column_widths[column_head])
nl()
def print_cell(content, width, leftmargin=1):
out(' ' * leftmargin)
string = str(content)
pad = (width - len(string)) * ' '
if util.isnumber(content):
out(pad + string)
else:
out(string + pad)
| mit | 4,710,433,475,010,807,000 | 34.649123 | 77 | 0.629429 | false |
markroxor/gensim | gensim/models/wrappers/varembed.py | 1 | 4389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Anmol Gulati <[email protected]>
# Copyright (C) 2017 Radim Rehurek <[email protected]>
"""
Python wrapper around word representation learning from Varembed models,
a library for efficient learning of word representations
and sentence classification [1].
This module allows ability to obtain word vectors for out-of-vocabulary words, for the Varembed model[2].
The wrapped model can not be updated with new documents for online training.
.. [1] https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings
.. [2] http://arxiv.org/pdf/1608.01056.pdf
"""
import logging
import numpy as np
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
from gensim.models.word2vec import Vocab
logger = logging.getLogger(__name__)
class VarEmbed(KeyedVectors):
"""
Class for word vectors using Varembed models. Contains methods to load a varembed model and implements
functionality like `most_similar`, `similarity` by extracting vectors into numpy matrix.
Refer to [Varembed]https://github.com/rguthrie3/MorphologicalPriorsForWordEmbeddings for
implementation of Varembed models.
"""
def __init__(self):
self.vector_size = 0
self.vocab_size = 0
@classmethod
def load_varembed_format(cls, vectors, morfessor_model=None):
"""
Load the word vectors into matrix from the varembed output vector files.
Using morphemes requires Python 2.7 version or above.
'vectors' is the pickle file containing the word vectors.
'morfessor_model' is the path to the trained morfessor model.
'use_morphemes' False(default) use of morpheme embeddings in output.
"""
result = cls()
if vectors is None:
raise Exception("Please provide vectors binary to load varembed model")
d = utils.unpickle(vectors)
word_to_ix = d['word_to_ix']
morpho_to_ix = d['morpho_to_ix']
word_embeddings = d['word_embeddings']
morpho_embeddings = d['morpheme_embeddings']
result.load_word_embeddings(word_embeddings, word_to_ix)
if morfessor_model:
try:
import morfessor
morfessor_model = morfessor.MorfessorIO().read_binary_model_file(morfessor_model)
result.add_morphemes_to_embeddings(morfessor_model, morpho_embeddings, morpho_to_ix)
except ImportError:
# Morfessor Package not found.
logger.error('Could not import morfessor. Not using morpheme embeddings')
raise ImportError('Could not import morfessor.')
logger.info('Loaded varembed model vectors from %s', vectors)
return result
def load_word_embeddings(self, word_embeddings, word_to_ix):
""" Loads the word embeddings """
logger.info("Loading the vocabulary")
self.vocab = {}
self.index2word = []
counts = {}
for word in word_to_ix:
counts[word] = counts.get(word, 0) + 1
self.vocab_size = len(counts)
self.vector_size = word_embeddings.shape[1]
self.syn0 = np.zeros((self.vocab_size, self.vector_size))
self.index2word = [None] * self.vocab_size
logger.info("Corpus has %i words", len(self.vocab))
for word_id, word in enumerate(counts):
self.vocab[word] = Vocab(index=word_id, count=counts[word])
self.syn0[word_id] = word_embeddings[word_to_ix[word]]
self.index2word[word_id] = word
assert((len(self.vocab), self.vector_size) == self.syn0.shape)
logger.info("Loaded matrix of %d size and %d dimensions", self.vocab_size, self.vector_size)
def add_morphemes_to_embeddings(self, morfessor_model, morpho_embeddings, morpho_to_ix):
""" Method to include morpheme embeddings into varembed vectors
Allowed only in Python versions 2.7 and above.
"""
for word in self.vocab:
morpheme_embedding = np.array(
[
morpho_embeddings[morpho_to_ix.get(m, -1)]
for m in morfessor_model.viterbi_segment(word)[0]
]
).sum(axis=0)
self.syn0[self.vocab[word].index] += morpheme_embedding
logger.info("Added morphemes to word vectors")
| lgpl-2.1 | 5,230,811,754,571,886,000 | 40.018692 | 106 | 0.652313 | false |
kasioumis/invenio | invenio/ext/session/model.py | 1 | 2073 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Example implementation of SQLAlchemy session model backend."""
from datetime import datetime
from flask.ext.login import current_user
from invenio.ext.sqlalchemy import db
class Session(db.Model):
"""Represent Session record."""
__tablename__ = 'session'
session_key = db.Column(db.String(32), nullable=False,
server_default='', primary_key=True)
session_expiry = db.Column(db.DateTime, nullable=True, index=True)
session_object = db.Column(db.LargeBinary, nullable=True)
uid = db.Column(db.Integer(15, unsigned=True), nullable=False, index=True)
def get_session(self, name, expired=False):
"""Return an instance of :class:`Session`."""
where = Session.session_key == name
if expired:
where = db.and_(
where, Session.session_expiry >= db.func.current_timestamp())
return self.query.filter(where).one()
def set_session(self, name, value, timeout=None):
"""Store value in database."""
uid = current_user.get_id()
session_expiry = datetime.utcnow() + timeout
return Session(session_key=name,
session_object=value,
session_expiry=session_expiry,
uid=uid)
| gpl-2.0 | -5,836,776,987,267,945,000 | 38.113208 | 78 | 0.667149 | false |
IntersectAustralia/asvo-tao | web/tao/deployment_tests/cone_basic.py | 1 | 3740 | """
Tests:
1. Galaxies positions are all within the light-cone geometry
2. The number of Galaxies in Catalogue 0 matches the expected number (optional)
3. The galaxy ids are unique across all light-cones (optional)
"""
import logging
from tao_validate import ValidateJob
logger = logging.getLogger('detest.'+__name__)
class Validator(ValidateJob):
def __init__(self):
self.doc = __doc__
super(Validator, self).__init__()
def validate(self, args, job_params):
super(Validator, self).validate(args, job_params)
self.catalogues = []
for i in range(self.job_params.NUMBER_OF_CONES):
self.catalogues.append(self.load_csv(i,
usecols=['Galaxy_ID', 'Right_Ascension', 'Declination',
'Redshift_Cosmological']))
self.check_geometry(self.catalogues[i])
self.check_galaxy_count(self.catalogues[0])
if getattr(self.job_params, 'CHECK_UNIQUE', False):
logger.info("Checking Galaxy IDs are unique")
for i in range(self.job_params.NUMBER_OF_CONES-1):
for j in range(i+1, self.job_params.NUMBER_OF_CONES):
logger.debug("Unique Galaxies between catalogues {0} and {1}".format(
i, j))
self.unique_galaxies(self.catalogues[i], self.catalogues[j])
logger.info("Finished Cone Basic Checks.")
return
def check_galaxy_count(self, cat):
"Check the number of galaxies in the supplied catalogue"
if getattr(self.job_params, 'GALAXY_COUNT', None) is None:
logger.info("Skipping galaxy check count")
return
logger.info("Checking galaxy count")
self.assert_true(len(cat) == self.job_params.GALAXY_COUNT,
"Galaxy counts don't match: got {0}, expected {1}".format(
len(cat), self.job_params.GALAXY_COUNT))
return
def unique_galaxies(self, cat1, cat2):
"Check that galaxies only appear in 1 catalogue"
gid1 = set(cat1['Galaxy_ID'].values)
gid2 = set(cat2['Galaxy_ID'].values)
common = gid1 & gid2
self.assert_true(len(common) == 0,
"Galaxy IDs are not unique: {0} in common".format(
len(common)))
return
def check_geometry(self, cat):
"Check that RA, Dec and Redshift are withing the catalogue geometry"
stats = cat['Right_Ascension'].describe()
self.assert_true(stats['max'] <= self.job_params.RA,
"Expected max RA of {0}, got {1}".format(
self.job_params.RA, stats['max']))
self.assert_true(stats['min'] >= 0.0,
"Negative RA: {0}".format(
stats['min']))
stats = cat['Declination'].describe()
self.assert_true(stats['max'] <= self.job_params.DEC,
"Expected max Dec of {0}, got {1}".format(
self.job_params.DEC, stats['max']))
self.assert_true(stats['min'] >= 0.0,
"Negative Dec: {0}".format(
stats['min']))
stats = cat['Redshift_Cosmological'].describe()
self.assert_true(stats['max'] <= self.job_params.REDSHIFT_MAX,
"Expected max Redshift of {0}, got {1}".format(
self.job_params.REDSHIFT_MAX, stats['max']))
self.assert_true(stats['min'] >= self.job_params.REDSHIFT_MIN,
"Expected min Redshift of {0}, got {1}".format(
self.job_params.REDSHIFT_MIN, stats['min']))
return | gpl-3.0 | -6,566,078,525,154,567,000 | 38.797872 | 89 | 0.552139 | false |
NorfairKing/sus-depot | shared/shared/vim/dotvim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/clang/flags_test.py | 1 | 6781 | # Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import eq_
from nose.tools import ok_
from ycmd.completers.cpp import flags
def SanitizeFlags_Passthrough_test():
eq_( [ '-foo', '-bar' ],
list( flags._SanitizeFlags( [ '-foo', '-bar' ] ) ) )
def SanitizeFlags_ArchRemoved_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-arch', 'arch_of_evil' ]
eq_( expected,
list( flags._SanitizeFlags( expected + to_remove ) ) )
eq_( expected,
list( flags._SanitizeFlags( to_remove + expected ) ) )
eq_( expected,
list( flags._SanitizeFlags(
expected[ :1 ] + to_remove + expected[ -1: ] ) ) )
def RemoveUnusedFlags_Passthrough_test():
eq_( [ '-foo', '-bar' ],
flags._RemoveUnusedFlags( [ '-foo', '-bar' ], 'file' ) )
def RemoveUnusedFlags_RemoveDashC_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-c' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveColor_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '--fcolor-diagnostics' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveDashO_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-o', 'output_name' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveMP_test():
expected = [ '-foo', '-bar' ]
to_remove = [ '-MP' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( to_remove + expected, filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveFilename_test():
expected = [ 'foo', '-bar' ]
to_remove = [ 'file' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
eq_( expected,
flags._RemoveUnusedFlags(
expected[ :1 ] + to_remove + expected[ -1: ], filename ) )
def RemoveUnusedFlags_RemoveFlagWithoutPrecedingDashFlag_test():
expected = [ 'g++', '-foo', '-x', 'c++', '-bar', 'include_dir' ]
to_remove = [ 'unrelated_file' ]
filename = 'file'
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
def RemoveUnusedFlags_RemoveFilenameWithoutPrecedingInclude_test():
def tester( flag ):
expected = [ 'clang', flag, '/foo/bar', '-isystem/zoo/goo' ]
eq_( expected,
flags._RemoveUnusedFlags( expected + to_remove, filename ) )
eq_( expected,
flags._RemoveUnusedFlags( expected[ :1 ] + to_remove + expected[ 1: ],
filename ) )
eq_( expected + expected[ 1: ],
flags._RemoveUnusedFlags( expected + to_remove + expected[ 1: ],
filename ) )
include_flags = [ '-isystem', '-I', '-iquote', '-isysroot', '--sysroot',
'-gcc-toolchain', '-include', '-iframework', '-F', '-imacros' ]
to_remove = [ '/moo/boo' ]
filename = 'file'
for flag in include_flags:
yield tester, flag
def RemoveXclangFlags_test():
expected = [ '-I', '/foo/bar', '-DMACRO=Value' ]
to_remove = [ '-Xclang', 'load', '-Xclang', 'libplugin.so',
'-Xclang', '-add-plugin', '-Xclang', 'plugin-name' ]
eq_( expected,
flags._RemoveXclangFlags( expected + to_remove ) )
eq_( expected,
flags._RemoveXclangFlags( to_remove + expected ) )
eq_( expected + expected,
flags._RemoveXclangFlags( expected + to_remove + expected ) )
def CompilerToLanguageFlag_Passthrough_test():
eq_( [ '-foo', '-bar' ],
flags._CompilerToLanguageFlag( [ '-foo', '-bar' ] ) )
def _ReplaceCompilerTester( compiler, language ):
to_removes = [
[],
[ '/usr/bin/ccache' ],
[ 'some_command', 'another_command' ]
]
expected = [ '-foo', '-bar' ]
for to_remove in to_removes:
eq_( [ compiler, '-x', language ] + expected,
flags._CompilerToLanguageFlag( to_remove + [ compiler ] + expected ) )
def CompilerToLanguageFlag_ReplaceCCompiler_test():
compilers = [ 'cc', 'gcc', 'clang', '/usr/bin/cc',
'/some/other/path', 'some_command' ]
for compiler in compilers:
yield _ReplaceCompilerTester, compiler, 'c'
def CompilerToLanguageFlag_ReplaceCppCompiler_test():
compilers = [ 'c++', 'g++', 'clang++', '/usr/bin/c++',
'/some/other/path++', 'some_command++',
'c++-5', 'g++-5.1', 'clang++-3.7.3', '/usr/bin/c++-5',
'c++-5.11', 'g++-50.1.49', 'clang++-3.12.3', '/usr/bin/c++-10',
'/some/other/path++-4.9.3', 'some_command++-5.1',
'/some/other/path++-4.9.31', 'some_command++-5.10' ]
for compiler in compilers:
yield _ReplaceCompilerTester, compiler, 'c++'
def ExtraClangFlags_test():
flags_object = flags.Flags()
num_found = 0
for flag in flags_object.extra_clang_flags:
if flag.startswith( '-resource-dir=' ):
ok_( flag.endswith( 'clang_includes' ) )
num_found += 1
eq_( 1, num_found )
| gpl-2.0 | -3,441,176,706,693,774,000 | 28.872247 | 83 | 0.596667 | false |
deepmind/interval-bound-propagation | examples/language/utils.py | 1 | 10260 | # coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for sentence representation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import logging
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib import lookup as contrib_lookup
def get_padded_embeddings(embeddings,
vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Reshapes and pads 'raw' word embeddings.
Say we have batch of B tokenized sentences, of variable length, with a total
of W tokens. For example, B = 2 and W = 3 + 4 = 7:
[['The', 'cat', 'eats'],
[ 'A', 'black', 'cat', 'jumps']]
Since rows have variable length, this cannot be represented as a tf.Tensor.
It is represented as a tf.SparseTensor, with 7 values & indexes:
indices: [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [1,3]]
values: ['The', 'cat', 'eats', 'A', 'black', 'cat', 'jumps']
We have also built a vocabulary table:
vocabulary table: ['cat', 'The', 'A', 'black', 'eats', 'jumps']
We also have the embeddings, a WxD matrix of floats
representing each word in the vocabulary table as a normal tf.Tensor.
For example, with D=3, embeddings could be:
[[0.4, 0.5, -0.6], # This is the embedding for word 0 = 'cat'
[0.1, -0.3, 0.6], # This is the embedding for word 1 = 'The''
[0.7, 0.8, -0.9], # This is the embedding for word 2 = 'A'
[-0.1, 0.9, 0.7], # This is the embedding for word 3 = 'black'
[-0.2, 0.4, 0.7], # This is the embedding for word 4 = 'eats
[0.3, -0.5, 0.2]] # This is the embedding for word 5 = 'jumps'
This function builds a normal tf.Tensor containing the embeddings for the
tokens provided, in the correct order, with appropriate 0 padding.
In our example, the returned tensor would be:
[[[0.1, -0.3, 0.6], [0.4, 0.5, -0.6], [-0.2, 0.4, 0.7], [0.0, 0.0, 0.0]],
[[0.7, 0.8, -0.9], [-0.1, 0.9, 0.7], [0.4, 0.5, -0.6], [0.3, -0.5, 0.2]]]
Note that since the first sentence has only 3 words, the 4th embedding gets
replaced by a D-dimensional vector of 0.
Args:
embeddings: [W, D] Tensor of floats, containing the embeddings, initialized
with the same vocabulary file as vocabulary_table.
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L, D] Tensor of floats: the embeddings in the correct order,
appropriately padded with 0.0, where L = max(num_tokens) and B = batch_size
"""
embedding_dim = embeddings.get_shape()[1].value # D in docstring above.
num_tokens_in_batch = tf.shape(tokens.indices)[0] # W in the docstring above.
max_length = tokens.dense_shape[1] # This is L in the docstring above.
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Get word embeddings.
tokens_embeddings = tf.gather(embeddings, indexes)
# Shape of the return tensor.
new_shape = tf.cast(
tf.stack([batch_size, max_length, embedding_dim], axis=0), tf.int32)
# Build the vector of indices for the return Tensor.
# In the example above, indices_final would be:
# [[[0,0,0], [0,0,1], [0,0,2]],
# [[0,1,0], [0,1,1], [0,1,2]],
# [[0,2,0], [0,2,1], [0,2,2]],
# [[1,0,0], [1,0,1], [1,0,2]],
# [[1,1,0], [1,1,1], [1,1,2]],
# [[1,2,0], [1,2,1], [1,2,2]],
# [[1,3,0], [1,3,1], [1,3,2]]]
tiled = tf.tile(tokens.indices, [1, embedding_dim])
indices_tiled = tf.cast(
tf.reshape(tiled, [num_tokens_in_batch * embedding_dim, 2]), tf.int32)
indices_linear = tf.expand_dims(
tf.tile(tf.range(0, embedding_dim), [num_tokens_in_batch]), axis=1)
indices_final = tf.concat([indices_tiled, indices_linear], axis=1)
# Build the dense Tensor.
embeddings_padded = tf.sparse_to_dense(
sparse_indices=indices_final,
output_shape=new_shape,
sparse_values=tf.reshape(tokens_embeddings,
[num_tokens_in_batch * embedding_dim]))
embeddings_padded.set_shape((batch_size, None, embedding_dim))
return embeddings_padded
def get_padded_indexes(vocabulary_table,
tokens, batch_size,
token_indexes=None):
"""Get the indices of tokens from vocabulary table.
Args:
vocabulary_table: a tf.contrib.lookup.LookupInterface,
containing the vocabulary, initialized with the same vocabulary file as
embeddings.
tokens: [B, ?] SparseTensor of strings, the tokens.
batch_size: Python integer.
token_indexes: A Boolean, indicating whether the input tokens are
token ids or string.
Returns:
[B, L] Tensor of integers: indices of tokens in the correct order,
appropriately padded with 0, where L = max(num_tokens) and B = batch_size
"""
num_tokens_in_batch = tf.shape(tokens.indices)[0]
max_length = tokens.dense_shape[1]
# Get indices of tokens in vocabulary_table.
if token_indexes is not None:
indexes = token_indexes
else:
indexes = vocabulary_table.lookup(tokens.values)
# Build the dense Tensor.
indexes_padded = tf.sparse_to_dense(
sparse_indices=tokens.indices,
output_shape=[batch_size, max_length],
sparse_values=tf.reshape(indexes,
[num_tokens_in_batch]))
indexes_padded.set_shape((batch_size, None))
return indexes_padded
class EmbedAndPad(snt.AbstractModule):
"""Embed and pad tokenized words.
This class primary functionality is similar to get_padded_embeddings.
It stores references to the embeddings and vocabulary table for convenience,
so that the user does not have to keep and pass them around.
"""
def __init__(self,
batch_size,
vocabularies,
embedding_dim,
num_oov_buckets=1000,
fine_tune_embeddings=False,
padded_token=None,
name='embed_and_pad'):
super(EmbedAndPad, self).__init__(name=name)
self._batch_size = batch_size
vocab_file, vocab_size = get_merged_vocabulary_file(vocabularies,
padded_token)
self._vocab_size = vocab_size
self._num_oov_buckets = num_oov_buckets
# Load vocabulary table for index lookup.
self._vocabulary_table = contrib_lookup.index_table_from_file(
vocabulary_file=vocab_file,
num_oov_buckets=num_oov_buckets,
vocab_size=self._vocab_size)
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
# The default value is chosen from language/bert/modeling.py.
return tf.truncated_normal_initializer(stddev=initializer_range)
self._embeddings = tf.get_variable('embeddings_matrix',
[self._vocab_size + num_oov_buckets,
embedding_dim],
trainable=fine_tune_embeddings,
initializer=create_initializer())
def _build(self, tokens):
padded_embeddings = get_padded_embeddings(
self._embeddings, self._vocabulary_table, tokens, self._batch_size)
return padded_embeddings
@property
def vocab_table(self):
return self._vocabulary_table
@property
def vocab_size(self):
return self._vocab_size + self._num_oov_buckets
def get_accuracy(logits, labels):
"""Top 1 accuracy from logits and labels."""
return tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
def get_num_correct_predictions(logits, labels):
"""Get the number of correct predictions over a batch."""
predictions = tf.cast(tf.argmax(logits, axis=1), tf.int64)
evals = tf.equal(predictions, labels)
num_correct = tf.reduce_sum(tf.cast(evals, tf.float64))
return num_correct
def get_merged_vocabulary_file(vocabularies, padded_token=None):
"""Merges several vocabulary files into one temporary file.
The TF object that loads the embedding expects a vocabulary file, to know
which embeddings it should load.
See tf.contrib.embedding.load_embedding_initializer.
When we want to train/test on several datasets simultaneously we need to merge
their vocabulary files into a single file.
Args:
vocabularies: Iterable of vocabularies. Each vocabulary should be
a list of tokens.
padded_token: If not None, add the padded_token to the first index.
Returns:
outfilename: Name of the merged file. Contains the union of all tokens in
filenames, without duplicates, one token per line.
vocabulary_size: Count of tokens in the merged file.
"""
uniques = [set(vocabulary) for vocabulary in vocabularies]
unique_merged = frozenset().union(*uniques)
unique_merged_sorted = sorted(unique_merged)
if padded_token is not None:
# Add padded token as 0 index.
unique_merged_sorted = [padded_token] + unique_merged_sorted
vocabulary_size = len(unique_merged_sorted)
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(b'\n'.join(unique_merged_sorted))
outfilename = outfile.name
logging.info('Merged vocabulary file with %d tokens: %s', vocabulary_size,
outfilename)
outfile.close()
return outfilename, vocabulary_size
| apache-2.0 | 4,491,862,746,991,396,000 | 37.716981 | 80 | 0.658285 | false |
fastcoding/sirrannon | python/newGUI/canvas.py | 1 | 36746 | import pygtk
pygtk.require("2.0")
import gtk
import cairo
import crcanvas
import math
import util
import gobject
import pango
from base import SirannonBase
# Constannts
MODE_RESIZE = 0
MODE_ROTATE = 1
MODE_CONNECT = 2
MODE_MOVE = 3
ARROW_H = 0
ARROW_V = 1
ARROWS = {}
SOUTH, WEST, NORTH, EAST = 0, 1, 2, 3
# Form factors
MODEL_DEFAULT_WIDTH = 208
MODEL_DEFAULT_HEIGHT = 117
MODEL_BORDER_COLOR = 'black'
MODEL_COLOR = 'grey'
MODEL_TEXT_COLOR = 'black'
MODEL_BORDER_NORMAL = 1
MODEL_BORDER_HIGHLIGHT = 3
TEXT_PADDING = 5
CONTROLLER_DEFAULT = 6
CONTROLLER_COLOR = 'red'
CONTROLLER_WIDTH = 7
CONTROLLER_HEIGHT = 7
CONTROLLER_BORDER_NORMAL = 1
CONTROLLER_COORDS = ((-1,-1), (-1,1), (1,-1), (1,1), (0,-1), (1,0), (0,1), (-1,0), (0.8,0.8))
CONTROLLER_ORIENTATION = ( NORTH, NORTH, NORTH, NORTH, NORTH, EAST, SOUTH, WEST, NORTH )
CONTROLLER_FORM = (crcanvas.Rectangle, crcanvas.Rectangle, crcanvas.Rectangle, crcanvas.Rectangle,
crcanvas.Rectangle, crcanvas.Rectangle, crcanvas.Rectangle, crcanvas.Rectangle,
crcanvas.Ellipse)
CONTROLLER_NORTH = 4
CONTROLLER_EAST = 5
CONTROLLER_SOUTH = 6
CONTROLLER_WEST = 7
CONTROLLER_COLOR = ('red', 'red', 'red', 'red', 'blue', 'blue', 'blue', 'blue', 'green')
CONTROLLER_MODE = (MODE_RESIZE, MODE_RESIZE, MODE_RESIZE, MODE_RESIZE,
MODE_CONNECT, MODE_CONNECT, MODE_CONNECT, MODE_CONNECT,
MODE_ROTATE)
ARROW_COLOR = 'blue'
ARROW_WIDTH = 2
ARROW_WIDTH_HIGHLIGHT = 3
ARROW_LENGTH = CONTROLLER_HEIGHT
ARROW_HEAD_WIDTH = 50
ARROWS[ARROW_H,ARROW_H] = ((0.0,0.0), (0.5,0.0), (0.5,1.0), (1.0,1.0))
ARROWS[ARROW_H,ARROW_V] = ((0.0,0.0), (1.0,0.0), (1.0,1.0))
ARROWS[ARROW_V,ARROW_V] = ((0.0,0.0), (0.0,0.5), (1.0,0.5), (1.0,1.0))
ARROWS[ARROW_V,ARROW_H] = ((0.0,0.0), (0.0,1.0), (1.0,1.0))
PROPERTIES_YPAD = 5
PROPERTIES_XPAD = 3
class SirannonArrow(SirannonBase):
def __init__(self, ctx, widget):
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.widget = widget
self.h0 = None
self.h1 = None
self.line = crcanvas.Line(line_width=ARROW_WIDTH, outline_color=ARROW_COLOR, close=False)
self.widget.root.add(self.line)
self.arrow = crcanvas.Arrow(point_id=-2, fatness=7, scaleable=True, angle=0, length=ARROW_LENGTH, depth=0, outline_color=ARROW_COLOR, line_width=ARROW_WIDTH)
self.line.add(self.arrow)
self.arrow.connect_parent(self.line)
self.line.connect('event', self.event)
def event(self, widget, event, matrix, pick_item):
if event.type == gtk.gdk.ENTER_NOTIFY:
self.line.props.line_width = ARROW_WIDTH_HIGHLIGHT
elif event.type == gtk.gdk.LEAVE_NOTIFY:
self.line.props.line_width = ARROW_WIDTH
elif event.type == gtk.gdk.BUTTON_PRESS:
self.ctx.getProperties().show_route(self.begin.master.name, self.end.master.name)
return False
def clear(self):
self.widget.root.remove(self.line)
self.begin.disconnect(self.h0)
if self.h1:
self.end.disconnect(self.h1)
def set_begin(self, controller):
if self.h0:
self.begin.disconnect(self.h0)
self.begin = controller
self.h0 = controller.connect("request-update", self.update)
def set_end(self, controller):
if self.h1:
self.end.disconnect(self.h1)
self.end = controller
self.h1 = controller.connect("request-update", self.update)
self.update_points(self.begin.props.x, self.begin.props.y, self.begin.master.getOrientation(self.begin),
self.end.props.x, self.end.props.y, self.end.master.getOrientation(self.end),
self.end.master.model.props.width, self.end.master.model.props.height)
def set_refined(self, begin_component, end_component):
w = begin_component.model.props.width
dx = end_component.model.props.x - begin_component.model.props.x
dy = end_component.model.props.y - begin_component.model.props.y
h = begin_component.model.props.height
# Ugly heuristic (if a component has more than one route from taper it out horizontally)
taper = len(self.ctx.getConfig().getRoutesFrom(begin_component.name)) == 1
if dx > w:
if dy > h:
if taper:
controllers = CONTROLLER_EAST, CONTROLLER_NORTH
else:
controllers = CONTROLLER_SOUTH, CONTROLLER_WEST
elif dy > -h:
controllers = CONTROLLER_EAST, CONTROLLER_WEST
else:
if taper:
controllers = CONTROLLER_EAST, CONTROLLER_SOUTH
else:
controllers = CONTROLLER_NORTH, CONTROLLER_WEST
elif dx > -w:
if dy > h:
controllers = CONTROLLER_SOUTH, CONTROLLER_NORTH
elif dy > h:
controllers = CONTROLLER_SOUTH, CONTROLLER_NORTH
else:
controllers = CONTROLLER_NORTH, CONTROLLER_SOUTH
else:
if dy > h:
if taper:
controllers = CONTROLLER_WEST, CONTROLLER_NORTH
else:
controllers = CONTROLLER_SOUTH, CONTROLLER_EAST
elif dy > -h:
controllers = CONTROLLER_WEST, CONTROLLER_EAST
else:
if taper:
controllers = CONTROLLER_WEST, CONTROLLER_SOUTH
else:
controllers = CONTROLLER_NORTH, CONTROLLER_EAST
begin, end = controllers
self.set_begin(begin_component.controllers[begin])
self.set_end(end_component.controllers[end])
def set_middle(self, x1, y1, m1):
self.update_points(self.begin.props.x, self.begin.props.y, self.begin.master.getOrientation(self.begin), x1, y1, m1, MODEL_DEFAULT_WIDTH, MODEL_DEFAULT_HEIGHT)
def update(self, event):
self.update_points(self.begin.props.x, self.begin.props.y, self.begin.master.getOrientation(self.begin),
self.end.props.x, self.end.props.y, self.end.master.getOrientation(self.end),
self.end.master.model.props.width, self.end.master.model.props.height)
def update_points(self, x0, y0, m0, x1, y1, m1, w1, h1):
import arrow
w0 = self.begin.master.model.props.width
h0 = self.begin.master.model.props.height
form = arrow.getPoints(x0, y0, m0, w0, h0, x1, y1, m1, w1, h1)
d = (arrow.Vector(x1, y1) - arrow.Vector(x0, y0)).abs() # absolute difference vector
points = []
points.append(x0)
points.append(y0)
p = arrow.Vector(x0, y0)
for a, b, r in form:
p = p + d * r * a + r * b
points.append(p.x)
points.append(p.y)
x2, y2 = points[-2], points[-1]
u = (arrow.Vector(x2, y2) - arrow.Vector(x1, y1)).unitize()
v = arrow.Vector(x1, y1) + u * ARROW_LENGTH
points.append(v.x)
points.append(v.y)
points.append(x1)
points.append(y1)
self.line.props.points = points
def save(self, gfx):
gfx.setAttribute('from', str(self.begin.id))
gfx.setAttribute('to', str(self.end.id))
class SirannonComponent(SirannonBase):
def __init__(self, ctx, widget, name, type, x=0, y=0, w=MODEL_DEFAULT_WIDTH, h=MODEL_DEFAULT_HEIGHT, r=0.0):
# Master
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.widget = widget
self.name = name
self.type = type
self.sync = True
# The model will be a rectangle.
self.model = crcanvas.Rectangle(x=x, y=y, width=w, height=h, outline_color=MODEL_BORDER_COLOR, fill_color=MODEL_COLOR)
self.text = crcanvas.Text(fill_color=MODEL_TEXT_COLOR, anchor=gtk.ANCHOR_CENTER, use_markup=True)
self.model.add(self.text)
self.text.props.text = '{0}\n(<i>{1}</i>)'.format(self.name, self.type)
self.model.mode = MODE_MOVE
self.model.id = -1
self.model.connect('event', self.event)
matrix = self.model.matrix
matrix.rotate(r)
self.model.matrix = matrix
# The controllers
self.controllers = [form(width=CONTROLLER_WIDTH,
height=CONTROLLER_HEIGHT,
outline_color=color,
line_width=CONTROLLER_BORDER_NORMAL,
test_fill=True) \
for form, color in zip(CONTROLLER_FORM, CONTROLLER_COLOR)]
# Position the controllers
for i, controller in enumerate(self.controllers):
controller.id = i
controller.mode = CONTROLLER_MODE[i]
controller.orientation = CONTROLLER_ORIENTATION[i]
controller.master = self
controller.connect('event', self.event)
# Check
w, h = self.min_size()
self.model.props.width = max(self.model.props.width, w)
self.model.props.height = max(self.model.props.height, h)
self.update_controllers()
# Add
self.widget.canvas.props.root.add(self.model)
for controller in self.controllers:
self.widget.canvas.props.root.add(controller)
def rename(self, new):
self.name = new
self.text.props.text = '{0}\n(<i>{1}</i>)'.format(self.name, self.type)
def clear(self):
self.widget.root.remove(self.model)
for controller in self.controllers:
self.widget.root.remove(controller)
def bounds(self):
x = self.model.props.x
y = self.model.props.y
w = self.model.props.width
h = self.model.props.height
return x - w/2 - CONTROLLER_WIDTH, y - h/2 - CONTROLLER_HEIGHT, x + w/2 + CONTROLLER_WIDTH, y + h/2 + CONTROLLER_HEIGHT
def get_size(self):
return self.model.props.width, self.model.props.height
def update_controllers(self):
w, h = self.model.props.width, self.model.props.height
for controller, (sx, sy) in zip(self.controllers, CONTROLLER_COORDS):
matrix = self.model.matrix
matrix.translate(sx*w/2, sy*h/2)
controller.matrix = matrix
def set_model(self, x=None, y=None):
if x is not None:
self.model.props.x = x
if y is not None:
self.model.props.y = y
self.update_controllers()
def move_model(self, dx, dy):
dx, dy = self.model.matrix.transform_distance(dx, dy)
self.model.props.x += dx
self.model.props.y += dy
self.ctx.getCanvas().updateScrollRegion()
def rotate_model(self, dx, dy):
import math
item = self.controllers[MODE_ROTATE]
angle = math.atan2(item.props.y + dy, item.props.x + dx) - \
math.atan2(item.props.y, item.props.x)
matrix = self.model.matrix
matrix.rotate(angle)
self.model.matrix = matrix
def resize_model(self, controller, dx, dy):
dx, dy = self.model.matrix.transform_distance(dx, dy)
sx, sy = CONTROLLER_COORDS[controller.id]
self.model.props.width += sx * dx
self.model.props.height += sy * dy
wmin, hmin = self.min_size()
if self.model.props.width < wmin:
self.model.props.width = wmin
else:
self.model.props.x += dx / 2
if self.model.props.height < hmin:
self.model.props.height = hmin
else:
self.model.props.y += dy / 2
def match(self, x2, y2):
for controller in self.controllers:
x0, y0, x1, y1 = controller.get_bounds()
x0, y0 = controller.matrix.transform_point(x0, y0)
x1, y1 = controller.matrix.transform_point(x1, y1)
if x0 <= x2 and x2 <= x1 and y0 <= y2 and y2 <= y1:
return controller
return None
def event(self, widget, event, matrix, pick_item):
if event.type == gtk.gdk.ENTER_NOTIFY:
self.model.props.line_width = MODEL_BORDER_HIGHLIGHT
elif event.type == gtk.gdk.BUTTON_PRESS:
widget.init_x = event.x
widget.init_y = event.y
self.ctx.getProperties().show_component(self.name)
elif event.type == gtk.gdk.BUTTON_RELEASE:
self.sync = True
elif event.type == gtk.gdk.MOTION_NOTIFY:
dx = event.x - widget.init_x
dy = event.y - widget.init_y
if widget.mode not in (MODE_MOVE, MODE_ROTATE, MODE_RESIZE):
return False
if self.sync:
self.ctx.sync()
self.sync = False
if widget.mode == MODE_MOVE:
self.move_model(dx, dy)
elif widget.mode == MODE_ROTATE:
self.rotate_model(dx, dy)
elif widget.mode == MODE_RESIZE:
self.resize_model(widget, dx, dy)
self.update_controllers()
elif event.type == gtk.gdk.LEAVE_NOTIFY:
self.model.props.line_width = MODEL_BORDER_NORMAL
return False
def getOrientation(self, controller):
import math, arrow
xx, yx, xy, yy, tx, ty = self.model.matrix
angle = math.atan2(xy, xx)
shift = - angle / math.pi * 2. - 0.5
return arrow.rot(controller.orientation, math.ceil(shift))
def save(self, gfx):
gfx.setAttribute('x', str(self.model.props.x))
gfx.setAttribute('y' ,str(self.model.props.y))
gfx.setAttribute('w', str(self.model.props.width))
gfx.setAttribute('h', str(self.model.props.height))
gfx.setAttribute('r', str(math.acos(self.model.matrix.transform_distance(1, 0)[0])))
def min_size(self):
local = crcanvas.Bounds()
device = crcanvas.DeviceBounds()
self.text.calculate_bounds(local, device)
w = local.x2 - local.x1
h = local.y2 - local.y1
return w + TEXT_PADDING, h + TEXT_PADDING
def get_angle(self):
xx, yx, xy, yy, tx, ty = self.model.matrix
return math.atan2(xy, xx)
class SirannonProperties(SirannonBase):
def __init__(self, ctx, widget):
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.widget = widget
self.name = ()
self.button = None
def get_value(self, obj):
if isinstance(obj, gtk.ComboBox):
if obj.get_active():
return 'True'
else:
return 'False'
else:
return obj.get_text()
def clear(self):
for child in self.widget.get_children():
self.widget.remove(child)
def getObj(self):
if len(self.name) == 1:
return self.ctx.getConfig().getComponent(*self.name)
else:
return self.ctx.getConfig().getRoute(*self.name)
def show_component(self, name):
self.save()
self.clear()
config = self.ctx.getConfig()
component = config.getComponent(name)
self.name = (name,)
type = component.getAttribute('type')
template_params = util.dictify(config.getTemplateParams(type))
params = config.getParams(component)
self.widget.resize(len(params)+4, 2)
i = 0
label = gtk.Label('name')
label.set_alignment(0.,0.5)
self.widget.attach(label, 0, 1, i, i+1, ypadding=PROPERTIES_YPAD)
self.button = gtk.Button()
self.button.set_label(name)
self.button.connect('clicked', self.eventRename)
self.widget.attach(self.button, 1, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
i += 1
label = gtk.Label('type')
label.set_alignment(0.,0.5)
self.widget.attach(label, 0, 1, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
entry = gtk.Button()
entry.set_label(type)
self.widget.attach(entry, 1, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
i += 1
sep = gtk.HSeparator()
self.widget.attach(sep, 0, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
i += 1
for param in params:
name = param.getAttribute('name')
ptype = param.getAttribute('type')
str = '{0} ({1})'.format(name, ptype)
label = gtk.Label(str)
label.set_alignment(0.,0.5)
#label.set_ellipsize(pango.ELLIPSIZE_END)
if name in template_params:
label.set_tooltip_text(template_params[name].getAttribute('desc'))
else:
label.set_tooltip_text('No description available')
self.widget.attach(label, 0, 1, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
if ptype == 'bool':
entry = gtk.combo_box_new_text()
entry.append_text('False')
entry.append_text('True')
val = util.bool(param.getAttribute('val'))
if val:
entry.set_active(1)
else:
entry.set_active(0)
entry.connect('changed', self.eventEdit, name)
else:
entry = gtk.Entry()
entry.set_alignment(0.)
entry.set_text(param.getAttribute('val'))
entry.connect('changed', self.eventEdit, name)
self.widget.attach(entry, 1, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
i += 1
sep = gtk.HSeparator()
self.widget.attach(sep, 0, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
i += 1
self.desc = gtk.TextView()
self.desc.set_wrap_mode(gtk.WRAP_WORD)
str = self.ctx.getConfig().getComponentDesc(type)
if not str:
str = 'No description available.'
self.desc.get_buffer().set_text(str)
self.desc.set_editable(False)
self.widget.attach(self.desc, 0, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD )
self.widget.show_all()
self.widget.set_reallocate_redraws(True)
gobject.timeout_add( 100, self.widget.check_resize)
def show_route(self, begin, end):
self.save()
self.clear()
component = self.ctx.getConfig().getRoute(begin, end)
self.name = (begin, end)
self.widget.resize(3, 2)
for i,str in enumerate(('from', 'to')):
label = gtk.Label(str)
label.set_alignment(0.,0.5)
self.widget.attach(label, 0, 1, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
entry = gtk.Entry()
entry.set_alignment(0.)
entry.set_text(component.getAttribute(str))
entry.editable = False
self.widget.attach(entry, 1, 2, i, i+1, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
label = gtk.Label('xroute')
label.set_alignment(0.,0.5)
self.widget.attach(label, 0, 1, 2, 3, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
entry = gtk.Entry()
entry.set_text(component.getAttribute('xroute'))
entry.connect('changed', self.eventEditRoute)
self.widget.attach(entry, 1, 2, 2, 3, xpadding=PROPERTIES_XPAD, ypadding=PROPERTIES_YPAD)
self.widget.show_all()
def load(self):
self.clear()
# Restore previous
if len(self.name) == 2:
route = self.ctx.getConfig().getRoute(*self.name)
if route:
self.show_route(*self.name)
elif len(self.name) == 1:
component = self.ctx.getConfig().getComponent(*self.name)
if component:
self.show_component(*self.name)
def save(self):
pass
def eventClear(self, widget):
pass
def eventDelete(self, widget):
if self.name:
self.ctx.sync()
if len(self.name) == 1:
self.ctx.getCanvas().removeComponent(*self.name)
else:
self.ctx.getCanvas().removeRoute(*self.name)
self.clear()
return True
def eventEdit(self, widget, name):
config = self.ctx.getConfig()
param = config.getParam(self.getObj(), name)
new = self.get_value(widget)
old = param.getAttribute('val')
if new != old:
config.sync() # CAVEAT INVALIDATES CURRENT PARAM
param = config.getParam(self.getObj(), name)
param.setAttribute('val', new)
def eventEditRoute(self, widget):
config = self.ctx.getConfig()
old = self.getObj().getAttribute('xroute')
new = widget.get_text()
if new != old:
config.sync()
self.getObj().setAttribute('xroute', new)
def eventRename(self, widget):
dialog = self.ctx.getWidget('RenameDialog')
dialog.set_markup('New name for the component.')
entry = self.ctx.getRenameEntry()
old = self.name[0]
entry.set_text(old)
entry.show()
while(True):
status = dialog.run()
dialog.hide()
if status == gtk.RESPONSE_OK:
new = entry.get_text()
if new != old:
if self.ctx.getCanvas().renameComponent(old, new) < 0:
dialog.set_markup('Name already exists. Choose a new unique name for the component.')
continue
else:
self.button.set_label(new)
self.button.pressed()
self.button.released()
self.name = (new,)
break
class SirannonMenu(SirannonBase):
def __init__(self, ctx):
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.menu = gtk.Menu()
self.reload()
def reload(self):
for child in self.menu.get_children():
self.menu.remove(child)
templates = self.ctx.getConfig().getTemplates()
abstracts = [item for item in sorted(templates, key=lambda x: x.getAttribute('name')) if item.getAttribute('abstract').lower() == 'true']
for i, abstract in enumerate(abstracts):
type = abstract.getAttribute('name')
if type == 'core': # Do not display core
continue
item = gtk.MenuItem(label=type)
submenu = gtk.Menu()
item.set_submenu(submenu)
self.menu.attach(item, 0, 1, i, i+1)
components = [item for item in sorted(templates, key=lambda x: x.getAttribute('name')) if item.getAttribute('type') == type]
for j, component in enumerate(sorted(components)):
name = component.getAttribute('name')
item = gtk.MenuItem(label=name)
submenu.attach(item, 0, 1, j, j+1)
item.connect('activate', self.newComponent, name)
self.menu.show_all()
def newComponent(self, widget, type):
self.ctx.getCanvas().createComponent(type)
def eventNewComponent(self, widget):
self.menu.popup(None, None, None, 0, 0)
def show(self):
self.menu.popup(None, None, None, 0, 0)
class SirannonCanvas(SirannonBase):
def __init__(self, ctx, master, widget):
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.master = master
self.widget = widget
self.components = {}
self.arrows = {}
self.line = None
self.canvas = crcanvas.Canvas(auto_scale=True, maintain_center=True, maintain_aspect=True, show_less=False)
self.widget.add(self.canvas)
self.root = self.canvas.props.root
self.root.connect('event', self.eventClickComponent)
self.canvas.connect('event', self.eventClickCanvas)
#self.canvas.set_repaint_mode(True)
self.fresh = True
self.canvas.set_max_scale_factor(5.0, 5.0)
self.canvas.set_min_scale_factor(0.2, 0.2)
def bounds(self):
# True bounds
xmin, ymin = float("inf"), float("inf")
xmax, ymax = -float("inf"), -float("inf")
for component in self.components.values():
x1, y1, x2, y2 = component.bounds()
if x1 < xmin: xmin = x1
if y1 < ymin: ymin = y1
if x2 > xmax: xmax = x2
if y2 > ymax: ymax = y2
# Convert into centered coords
w = xmax - xmin
h = ymax - ymin
cx = (xmin + xmax) * .5
cy = (ymin + ymax) * .5
# Force into widget aspect
x, y, w_out, h_out = self.widget.get_allocation()
scale = float(w_out) / float(h_out) * h / w
if scale > 1.0:
w *= scale
else:
h /= scale
# Add some padding
w *= 1.2
h *= 1.2
return cx, cy, w, h
def eventAlignVer(self, button):
if self.components:
self.ctx.sync()
xtot = sum([c.model.props.x for c in self.components.itervalues()])
xtot /= len(self.components)
for c in self.components.itervalues():
c.set_model(x=xtot)
self.canvas.queue_repaint()
return True
def eventAlignHor(self, button):
if self.components:
self.ctx.sync()
ytot = sum([c.model.props.y for c in self.components.itervalues()])
ytot /= len(self.components)
for c in self.components.itervalues():
c.set_model(y=ytot)
self.canvas.queue_repaint()
return True
def eventZoomNormal(self, widget):
if not self.components:
return
cx, cy, w, h = self.bounds()
self.canvas.center_scale(cx, cy, w, h)
self.canvas.set_scroll_region(cx-2*w, cy-2*h, cx+2*w, cy+2*h)
def eventZoomIn(self, widget):
self.canvas.zoom(1.5, 0.)
def eventZoomOut(self, widget):
self.canvas.zoom(2./3., 0.)
def updateScrollRegion(self):
cx, cy, w, h = self.bounds()
x1, y1, x2, y2 = self.canvas.get_viewport()
w = max(w, x2-x1)
h = max(h, y2-y1)
self.canvas.set_scroll_region(cx-2*w, cy-2*h, cx+2*w, cy+2*h)
def eventClickCanvas(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
print 'Right click'
self.master.getMenu().show()
def eventClickComponent(self, widget, event, matrix, pick_item):
if event.type == gtk.gdk.BUTTON_PRESS:
if hasattr(pick_item, 'mode') and pick_item.mode == MODE_CONNECT:
self.begin_line(pick_item, event)
print pick_item
if not pick_item:
print 'Empty'
if pick_item == self.canvas:
print 'Slef'
elif event.type == gtk.gdk.MOTION_NOTIFY:
if self.line:
self.move_line(event)
elif event.type == gtk.gdk.BUTTON_RELEASE:
if self.line:
self.end_line(event)
return False
def clear(self):
for component in self.components.values():
component.clear()
self.components.clear()
for arrow in self.arrows.values():
arrow.clear()
self.arrows.clear()
self.canvas.queue_repaint()
def begin_line(self, controller, event):
self.begin, controller = self.match(event.x, event.y)
if not controller:
return
self.line = SirannonArrow(self.ctx, self)
self.line.set_begin(controller)
def move_line(self, event):
import arrow
e = arrow.Vector(event.x, event.y)
d_min = 1.e+99
closest = None
# Determine the controller orientation based on the closed controller
for component in self.components.values():
for controller in component.controllers:
d = (arrow.Vector(controller.props.x, controller.props.y) - e).norm()
if controller.mode != MODE_CONNECT:
continue
if d < d_min:
closest = controller
d_min = d
if closest == self.line.begin or d_min > 10000.:
# We are near the original, take its opposite
o = arrow.flip(closest.orientation)
self.line.set_middle(event.x, event.y, o)
else:
self.line.set_middle(event.x, event.y, closest.orientation)
def end_line(self, event):
self.end, controller = self.match(event.x, event.y)
if controller and controller.mode == MODE_CONNECT:
if (self.begin.name, self.end.name) in self.arrows:
self.line.clear()
elif self.begin.name == self.end.name:
self.line.clear()
else:
self.line.set_end(controller)
self.createRoute(self.begin.name, self.end.name, self.line)
else:
self.line.clear()
self.line = None
def load(self):
self.clear()
print 'Load canvas'
self.old = False
for component in self.ctx.getConfig().getElements('component'):
self.drawComponent(component)
for route in self.ctx.getConfig().getElements('route'):
self.drawRoute(route)
#util.warning('Ignoring invalid route')
if self.old:
#util.warning(self.ctx, 'Upgraded XML to new GUI')
self.ctx.getConfig().forceDirty()
if self.fresh:
gobject.timeout_add(100, self.eventZoomNormal, None)
self.fresh = False
self.canvas.show_all()
def save(self):
for name in self.components.keys():
component = self.ctx.getConfig().getComponent(name)
gfx = self.ctx.getConfig().getChild(component, 'gfx')
self.components[name].save(gfx)
for begin, end in self.arrows.keys():
route = self.ctx.getConfig().getRoute(begin, end)
gfx = self.ctx.getConfig().getChild(route, 'gfx')
self.arrows[begin, end].save(gfx)
self.ctx.getProperties().save()
def match(self, x, y):
for component in self.components.values():
controller = component.match(x,y)
if controller:
return component, controller
return None, None
def renameComponent(self, name, new):
if new in self.components.keys():
return -1
self.ctx.sync()
component = self.components[name]
del self.components[name]
self.components[new] = component
xml_component = self.ctx.getConfig().getComponent(name)
xml_component.setAttribute('name', new)
component.rename(new)
for (begin, end), route in self.arrows.iteritems():
if begin == name:
del self.arrows[begin, end]
self.arrows[new, end] = route
xml_route = self.ctx.getConfig().getRoute(begin, end)
xml_route.setAttribute('from', new)
elif end == name:
del self.arrows[begin, end]
self.arrows[begin, new] = route
xml_route = self.ctx.getConfig().getRoute(begin, end)
xml_route.setAttribute('to', new)
return 0
def removeComponent(self, name):
# Remove from GUI
component = self.components[name]
component.clear()
del self.components[name]
# Remove from config
config = self.ctx.getConfig()
xml_component = config.getComponent(name)
config.delElement(xml_component)
# Clear arrows
for begin, end in self.arrows.keys():
if begin == name or end == name:
self.removeRoute(begin, end)
self.canvas.queue_repaint()
def removeRoute(self, begin, end):
# Remove from GUI
arrow = self.arrows[begin, end]
arrow.clear()
del self.arrows[begin, end]
# Remove from config
config = self.ctx.getConfig()
xml_route = config.getRoute(begin, end)
config.delElement(xml_route)
self.canvas.queue_repaint()
def drawComponent(self, component):
# Dimensions
gfxs = component.getElementsByTagName("gfx")
if not gfxs:
x, y, w, h, r = 0., 0., MODEL_DEFAULT_WIDTH, MODEL_DEFAULT_HEIGHT, 0.
self.old = True
else:
gfx = gfxs[0]
x = float(gfx.getAttribute("x"))
y = float(gfx.getAttribute("y"))
w = float(gfx.getAttribute("w"))
h = float(gfx.getAttribute("h"))
r = 0.
if gfx.hasAttribute("r"):
r = float(gfx.getAttribute("r"))
else:
x += w/2
y += h/2
name = str(component.getAttribute("name"))
type = str(component.getAttribute("type"))
# Draw
self.components[name] = SirannonComponent(self.ctx, self, name, type, x, y, w, h, r)
def drawRoute(self, route):
begin = str(route.getAttribute('from'))
end = str(route.getAttribute('to'))
begin_component = self.components[begin]
end_component = self.components[end]
xroute = int(route.getAttribute('xroute'))
gfx = route.getElementsByTagName("gfx")
arrow = SirannonArrow(self.ctx, self)
if gfx:
begin_controller_id = int(gfx[0].getAttribute('from'))
end_controller_id = int(gfx[0].getAttribute('to'))
arrow.set_begin(self.components[begin].controllers[begin_controller_id])
arrow.set_end(self.components[end].controllers[end_controller_id])
else:
arrow.set_refined(begin_component, end_component)
self.old = True
self.arrows[begin, end] = arrow
def createRoute(self, begin, end, arrow):
self.ctx.sync()
self.arrows[begin, end] = arrow
self.ctx.getConfig().createRoute(begin, end, 0)
def createComponent(self, type):
self.ctx.sync()
screen, x, y, mask = gtk.gdk.display_get_default().get_pointer()
x0, y0, w, h = self.canvas.get_allocation()
config = self.ctx.getConfig()
matrix = self.root.matrix
matrix.invert()
x, y = matrix.transform_point(x - x0, y - y0)
name = config.uniqueName(type)
self.components[name] = SirannonComponent(self.ctx, self, name, type, x, y)
config.createComponent(name, type)
class SirannonDraw(SirannonBase):
def __init__(self, ctx, canvas_widget, properties_widget):
SirannonBase.__init__(self, ctx)
self.registerEvents()
self.__props = SirannonProperties(ctx, properties_widget)
self.__menu = SirannonMenu(ctx)
self.__canvas = SirannonCanvas(ctx, self, canvas_widget)
def getMenu(self):
return self.__menu
def getProperties(self):
return self.__props
def getCanvas(self):
return self.__canvas
def save(self):
self.__canvas.save()
self.__props.save()
def load(self):
self.__canvas.load()
self.__props.load()
| gpl-3.0 | 5,787,790,298,413,505,000 | 37.681053 | 167 | 0.548713 | false |
marcos-sb/quick-openstacked-hadoop | Alba/albaproject/mapred/fabric/fabfile.py | 1 | 8199 | from fabric.api import run, sudo, env, roles, settings, hide, execute, local, put, get, show
import re, pdb
import time
env.connection_attempts = 250
env.timeout = 1
env.abort_on_prompts = True
env.disable_known_hosts = True
env.no_keys = True
env.no_agent = True
##################### move to mapred environment
env.user = 'hduser'
env.password = 'hduser'
#####################
# SET I/O, KEY, RAM, MASTER AND SLAVES BEFORE USING #################################
#env.roledefs = {
# 'master':['192.100.0.1'],
# 'slaves':['192.100.0.1']
#}
#env.roledefs = None
#env.role2priv_ip = {
# 'master':['172.100.0.3'],
# 'slaves':['172.100.0.3']
#}
env.role2priv_ip = dict()
env.key_filename = None
#env.key_filename = '~/Documents/test.pem'
env.input_filename = None
#env.input_filename = '/home/marcos/Documents/words.tar.gz'
env.mapred_job_filename = None
env.mapred_job_impl_class = None
env.output_path = None
#env.output_path = '/home/marcos/Documents'
###################################### move to mapred environment
env.remote_input_path = '~/hadoop/input'
env.remote_mapred_job_path = '~/hadoop/mapred'
env.remote_output_path = '~/hadoop/output'
###############################
env.remote_mapred_job_filename = None
env.mb_ram = None
# AGAIN, SET I/O, KEY, RAM, MASTER AND SLAVES BEFORE USING #################################
def set_input_filename(abs_file_path=None):
env.input_filename = abs_file_path
def set_mapred_job_filename(abs_file_path=None):
env.mapred_job_filename = abs_file_path
def set_mapred_job_impl_class(fq_class_name=None):
env.mapred_job_impl_class = fq_class_name
def set_output_path(output_path=None):
env.output_path = output_path
def set_key(abs_file_path=None, file_name=None, priv_key=None):
env.key_filename = abs_file_path + file_name;
with open(env.key_filename, 'w') as f:
f.write(priv_key)
local('chmod 0600 ' + env.key_filename)
def set_master_ips(priv_ipv4=None, pub_ipv4=None):
env.roledefs['master'] = [pub_ipv4]
env.role2priv_ip['master'] = [priv_ipv4]
def set_slaves_ips(priv_ipv4_list=None, pub_ipv4_list=None):
env.roledefs['slaves'] = pub_ipv4_list
env.role2priv_ip['slaves'] = priv_ipv4_list
def set_hadoop_ram(mb_ram=None):
env.mb_ram = mb_ram
########################################################################
def clean_file(file_name):
sudo('rm -rf ' + file_name)
sudo('touch ' + file_name)
sudo('chmod 664 ' + file_name)
sudo('chown :hadoop ' + file_name)
def _set_hadoop_ram():
env_file = '$HADOOP_CONF_DIR/hadoop-env.sh'
run("sed -ri 's_-Xmx[0-9]+m_-Xmx{0}m_' {1}".format(env.mb_ram, env_file))
def set_hadoop_master():
masters_file = '$HADOOP_CONF_DIR/masters'
clean_file(masters_file)
run('echo {0} > {1}'.format(env.role2priv_ip['master'][0], masters_file))
def set_hadoop_slaves():
slaves_file = '$HADOOP_CONF_DIR/slaves'
clean_file(slaves_file)
command = '"{0}\\n'.format(env.role2priv_ip['master'][0])
for slave in env.role2priv_ip['slaves']:
command += slave + '\\n'
command = command[:-2] + '"' #"-2" as the '\' is escaped
run('echo -e {0} > {1}'.format(command, slaves_file))
def set_hadoop_core_site():
core_site_file = '$HADOOP_CONF_DIR/core-site.xml'
run("sed -ri 's_//[a-z0-9]+:_//{0}:_' {1}" \
.format(env.role2priv_ip['master'][0], core_site_file))
def set_hadoop_mapred_site():
mapred_site_file = '$HADOOP_CONF_DIR/mapred-site.xml'
run("sed -ri 's_>[a-z0-9]+:_>{0}:_' {1}" \
.format(env.role2priv_ip['master'][0], mapred_site_file))
#@roles('master')
#def set_hadoop_hdfs_site():
# hdfs_site_file = '$HADOOP_CONF_DIR/hdfs-site.xml'
# run("sed -ir 's_>[a-z0-9]+:_>{0}:_' {1}" \
# .format(env.role2priv_ip['master'][0], hdfs_site_file))
@roles('master','slaves')
def ping_all():
while True:
try:
run('echo "$(hostname) is up"')
return
except:
time.sleep(1)
@roles('master')
def configure_master():
_set_hadoop_ram()
set_hadoop_master()
set_hadoop_slaves()
set_hadoop_core_site()
set_hadoop_mapred_site()
@roles('slaves')
def configure_slaves():
_set_hadoop_ram()
set_hadoop_core_site()
set_hadoop_mapred_site()
@roles('master')
def format_hdfs():
hdfs_site_file_name = '$HADOOP_CONF_DIR/hdfs-site.xml'
hdfs_site_file = run('cat ' + hdfs_site_file_name)
match = re.search(r'>(?P<dfs_name_dir>.+name)<', hdfs_site_file)
dfs_name_dir = match.groupdict() ['dfs_name_dir']
sudo('rm -rf ' + dfs_name_dir + '/../*') #remove all folders from a previous hdfs formatting
run('hadoop namenode -format')
@roles('master')
def start_hdfs():
put(local_path=env.key_filename,
remote_path='~/.ssh/id_rsa',
mode=0600)
run('start-dfs.sh')
@roles('master')
def stop_hdfs():
run('stop-dfs.sh')
@roles('master')
def start_mapred():
run('start-mapred.sh')
@roles('master')
def stop_mapred():
run('stop-mapred.sh')
@roles('master')
def put_input():
run('rm -rf ' + env.remote_input_path)
run('mkdir -p ' + env.remote_input_path)
put(local_path=env.input_filename, remote_path=env.remote_input_path)
##################################################################################
match = re.search(r'.*/(?P<file_name>.+)(?P<file_ext>\..+$)', env.input_filename)
file_ext = match.groupdict() ['file_ext']
file_name = match.groupdict() ['file_name'] + file_ext
##################################################################################
cmd = {}
cmd['.tbz'] = cmd['.bz2'] = \
cmd['.tgz'] = cmd['.gz'] = 'tar -C {0} -xvf'.format(env.remote_input_path)
#cmd['.zip'] = 'unzip' ...
#pdb.set_trace()
run('{0} {1}/{2}'.format(
cmd[file_ext], env.remote_input_path, file_name))
run('rm -f {0}/{1}'.format(env.remote_input_path, file_name))
#with settings(warn_only=True):
# run('hadoop dfs -rmr input output')
run('hadoop dfs -mkdir input')
run('hadoop dfs -put {0}/* input'.format(
env.remote_input_path))
@roles('master')
def put_mapred_job():
run('rm -rf ' + env.remote_mapred_job_path)
run('mkdir -p ' + env.remote_mapred_job_path)
put(local_path=env.mapred_job_filename, remote_path=env.remote_mapred_job_path)
##################################################################################
match = re.search(r'.*/(?P<file_name>.+)(?P<file_ext>\..+$)', env.mapred_job_filename)
file_ext = match.groupdict() ['file_ext']
file_name = match.groupdict() ['file_name'] + file_ext
##################################################################################
env.remote_mapred_job_filename = '{0}/{1}'.format(env.remote_mapred_job_path, file_name)
@roles('master')
def run_comp():
run('hadoop jar {0} {1} input output'.format(
env.remote_mapred_job_filename, env.mapred_job_impl_class))
@roles('master')
def get_output():
run('rm -rf ' + env.remote_output_path)
run('mkdir -p ' + env.remote_output_path)
run('hadoop dfs -get output ' + env.remote_output_path)
run('tar -C {0} -czvf {0}/hadoop.out.tar.gz output'.format(env.remote_output_path))
local('mkdir -p ' + env.output_path)
get(remote_path='{0}/hadoop.out.tar.gz'.format(env.remote_output_path),
local_path=env.output_path)
def stop():
execute(stop_hdfs)
execute(stop_mapred)
def delete_keypair():
local('rm -rf ' + env.key_filename)
def get_output_file_name():
return 'hadoop.out.tar.gz'
def start(timer):
with show('debug'):
execute(ping_all)
timer.hadoopwfconfstart = time.time()
execute(configure_master)
execute(configure_slaves)
execute(format_hdfs)
execute(start_hdfs)
execute(start_mapred)
execute(put_input)
execute(put_mapred_job)
timer.hadoopmapredstart = time.time()
execute(run_comp)
timer.hadoopmapredend = time.time()
execute(get_output)
#execute(stop)
#execute()
| apache-2.0 | 5,768,290,729,784,638,000 | 28.282143 | 96 | 0.570679 | false |
codeforamerica/comport | comport/admin/views.py | 1 | 4390 | # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, redirect, url_for, flash, abort
from comport.utils import flash_errors
from flask.ext.login import login_required
from .forms import NewDepartmentForm, NewInviteForm, EditUserForm, EditExtractorForm
from comport.department.models import Department, Extractor
from comport.user.models import Invite_Code, User
from comport.interest.models import Interested
from comport.decorators import requires_roles
import uuid
blueprint = Blueprint("admin", __name__, url_prefix='/admin',
static_folder="../static")
@blueprint.route("/")
@login_required
@requires_roles(["admin"])
def admin_dashboard():
interesteds = Interested.query.all()
invites = Invite_Code.query.filter_by(used=False)
users = User.query.filter_by(active=True)
extractors = Extractor.query.all()
departments = Department.query.all()
return render_template("admin/dashboard.html", interesteds=interesteds, invites=invites, users=users, extractors=extractors, departments=departments)
@blueprint.route("/department/new", methods=["GET", "POST"])
@login_required
@requires_roles(["admin"])
def add_department():
form = NewDepartmentForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
Department.create(name=form.department_name.data, short_name=form.department_short_name.data)
flash('Department %s created.' % form.department_name.data, 'info')
return redirect(url_for('admin.admin_dashboard'))
else:
flash_errors(form)
return render_template("admin/newDepartment.html", form=form)
@blueprint.route("/invite/new", methods=["GET", "POST"])
@login_required
@requires_roles(["admin"])
def new_invite_code():
form = NewInviteForm(request.form)
form.department_id.choices = [(d.id, d.name) for d in Department.query.order_by('name')]
if request.method == 'POST':
if form.validate_on_submit():
invite = Invite_Code.create(department_id=form.department_id.data, code=str(uuid.uuid4()), used=False)
flash('Invite Code for {0}: {1} created.'.format(invite.department.name, invite.code), 'info')
return redirect(url_for('admin.admin_dashboard'))
else:
flash_errors(form)
return render_template("admin/newInvite.html", form=form)
@blueprint.route("/user/<int:user_id>/edit", methods=["GET", "POST"])
@login_required
@requires_roles(["admin"])
def edit_user(user_id):
user = User.get_by_id(user_id)
if not user:
abort(404)
form = EditUserForm(request.form, departments=[d.id for d in user.departments])
form.departments.choices = [(d.id, d.name) for d in Department.query.order_by('name')]
if request.method == 'POST':
user.departments = [Department.get_by_id(int(d)) for d in form.departments.data]
user.save()
flash('User updated.', 'info')
return redirect(url_for('admin.admin_dashboard'))
return render_template("admin/editUser.html", form=form, user=user)
@blueprint.route("/user/<int:user_id>/passwordReset", methods=["GET", "POST"])
@login_required
@requires_roles(["admin"])
def start_password_reset(user_id):
user = User.get_by_id(user_id)
if not user:
abort(404)
if request.method == 'POST':
user.password_reset_uuid = str(uuid.uuid4())
user.save()
flash('User password reset engaged.', 'info')
return redirect(url_for('admin.edit_user', user_id=user_id))
return redirect(url_for('admin.edit_user', user_id=user_id))
@blueprint.route("/extractor/<int:extractor_id>/edit", methods=["GET", "POST"])
@login_required
@requires_roles(["admin"])
def edit_extractor(extractor_id):
extractor = Extractor.get_by_id(extractor_id)
if not extractor:
abort(404)
form = EditExtractorForm(request.form, departments=[d.id for d in extractor.departments])
form.departments.choices = [(d.id, d.name) for d in Department.query.order_by('name')]
if request.method == 'POST':
extractor.departments = [Department.get_by_id(int(d)) for d in form.departments.data]
extractor.save()
flash('Extractor updated.', 'info')
return redirect(url_for('admin.admin_dashboard'))
return render_template("admin/editExtractor.html", form=form, extractor=extractor)
| bsd-3-clause | 5,361,072,941,567,465,000 | 39.275229 | 153 | 0.682005 | false |
anuragjain67/django-sql-reports | sqlreports/core.py | 1 | 3453 | from collections import OrderedDict
from django.http import HttpResponse
from django.template import Context, Template
from django.utils.html import escape
from sqlreports.utils import CSVWriter, get_db_connection
from sqlreports.models import SQLReport
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
OrderedDict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
class ReportFormatter(object):
def filename(self):
return self.filename_template
class ReportCSVFormatter(ReportFormatter):
filename_template = 'sqlreports.csv'
def get_csv_writer(self, file_handle, **kwargs):
return CSVWriter(open_file=file_handle, **kwargs)
def generate_response(self, headers, objects, **kwargs):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' \
% self.filename(**kwargs)
self.generate_csv(response, headers, objects)
return response
def generate_csv(self, response, headers, objects):
writer = self.get_csv_writer(response)
# Write a first row with header information
writer.writerow(headers)
# Write data rows
for data_obj in objects:
writer.writerow([data_obj[header] for header in headers])
return response
class ReportHTMLFormatter(ReportFormatter):
def generate_response(self, headers, objects, **kwargs):
return objects
class ReportGenerator(object):
formatters = {
'CSV_formatter': ReportCSVFormatter,
'HTML_formatter': ReportHTMLFormatter
}
def __init__(self, **kwargs):
formatter_name = '%s_formatter' % kwargs['formatter']
self.formatter = self.formatters[formatter_name]()
def generate(self, report_id, params):
records = self.get_report_data(report_id, params)
headers = records[0].keys()
return self.formatter.generate_response(headers, records)
def get_report_query(self, report_id, params_dict):
""" QueryExample:
select id, checkin_time from auth_user where email = '{{EMAIL_ID}}'
"""
# FIXME: Need to include MySQL Escape
query = SQLReport.objects.get(id=report_id).query
t = Template(query)
# Escaping Params
escaped_params = {}
for item in params_dict.items():
escaped_params[item[0]] = escape(item[1])
c = Context(escaped_params)
return t.render(c)
def get_report_data(self, report_id, params):
""" For given sqlreports id and params it return the sqlreports data"""
# FIXME: Connection should have only read only permission
query = self.get_report_query(report_id, params)
cursor = get_db_connection().cursor()
cursor.execute(query)
return dictfetchall(cursor)
def is_available_to(self, user, report):
"""
Checks whether this report is available to this user
"""
if user.is_superuser:
# Super users are allowed everything
return True
if not user.is_staff:
# Non Staff are never allowed access to sqlreports
return False
# Allowed only if sqlreports is designated as a non-super user allowed
if not report.user_allowed:
return False
return True
| mit | -7,402,006,128,133,347,000 | 31.271028 | 79 | 0.646394 | false |
sirex/atviriduomenys.lt | adlt/frontpage/api.py | 1 | 1171 | from django.http import Http404
from django.contrib.auth.decorators import login_required
from django.db.models import F
from adlt.common.helpers import ajax
import adlt.core.models as core_models
@ajax.request('GET')
def agent_list(request): # pylint: disable=unused-argument
return [
{
'pk': agent.pk,
'title': agent.title,
}
for agent in core_models.Agent.objects.all()
]
@login_required
@ajax.request('GET')
def like_toggle(request, object_type, object_id):
object_types = {
'dataset': core_models.Dataset,
'project': core_models.Project,
}
if object_type not in object_types:
raise Http404
qs = core_models.Likes.objects.filter(user=request.user, object_type=object_type, object_id=object_id)
if qs.exists():
object_types[object_type].objects.filter(pk=object_id).update(likes=F('likes') - 1)
qs.delete()
else:
object_types[object_type].objects.filter(pk=object_id).update(likes=F('likes') + 1)
core_models.Likes.objects.create(user=request.user, object_type=object_type, object_id=object_id)
return {'status': 'ok'}
| agpl-3.0 | 8,084,578,697,310,724,000 | 29.025641 | 106 | 0.660974 | false |
mikeshultz/ethsnap | ethsnap/scripts/ethsnap-snapshot.py | 1 | 2864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import argparse
import configparser
import datetime
import tempfile
import sqlite3
import hashlib
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ini", help="The ethsnap ini configuration", \
metavar="FILE", nargs=1, default="/etc/ethsnap.ini")
args = parser.parse_args()
# open and parse config file
config = configparser.ConfigParser()
config.read(args.ini)
# Store what we need
OUT_DIR = config.get('default', 'archivedir')
DATA_DIR = config.get('default', 'datadir', \
fallback=os.path.expanduser("~/.ethereum"))
SQLITE_FILE = config.get('default', 'sqlite', fallback="/var/ethsnap/ethsnap.db")
TIMEOUT = config.getint('default', 'timeout', fallback=3600)
# sanity checks
if not os.path.isdir(OUT_DIR):
print("%s is not a directory" % OUT_DIR, file=sys.stderr)
sys.exit(1)
if not os.path.isdir(DATA_DIR):
print("%s is not a directory" % DATA_DIR, file=sys.stderr)
sys.exit(1)
# function to hash files
def hash_file(filename):
BLOCKSIZE = 65536
hashy = hashlib.sha1()
with open(filename, 'rb') as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hashy.update(buf)
buf = f.read(BLOCKSIZE)
return hashy.hexdigest()
# Set our original directory
orig_pwd = os.getcwd()
# Set the location of the DB, but figure out if it's relative or absolute
if SQLITE_FILE[0] == "/":
sqlite_db = SQLITE_FILE
else:
sqlite_db = os.path.join(orig_pwd, SQLITE_FILE)
# Compile the output filename we need
output_filename = datetime.datetime.now().strftime("ethereum-chaindata-%Y-%m-%d-%H%M%S.tar.gz")
output_file = os.path.join(OUT_DIR, output_filename)
# Switch to the chaindata directory
os.chdir(os.path.join(DATA_DIR, "geth/chaindata"))
# Run the archive command
process = subprocess.Popen(['tar', '-czf', output_file, '.'], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate(timeout=TIMEOUT)
returncode = process.returncode
# Check its return
if returncode != 0:
print(err, file=sys.stderr)
print("Archive complete")
# Get info on new file
archive_stat = os.stat(output_file)
archive_size = archive_stat.st_size
sha1sum = hash_file(output_file)
# Connect to sqlite db
conn = sqlite3.connect(sqlite_db)
# get the cursor
cur = conn.cursor()
# Create the table if necessary
cur.execute("""CREATE TABLE IF NOT EXISTS snapshots
(id integer primary key, timestamp int, sha1 text, size int, filename text)""")
# insert this new record
cur.execute("""INSERT INTO snapshots (timestamp, sha1, size, filename)
VALUES (:stamp, :sha1, :size, :file)""",
{
'stamp': datetime.datetime.utcnow().timestamp(),
'sha1': sha1sum,
'size': archive_size,
'file': output_filename
}
)
conn.commit() | gpl-3.0 | -3,445,864,085,058,283,000 | 26.815534 | 95 | 0.683659 | false |
dpa-newslab/livebridge-liveblog | tests/test_post.py | 1 | 5419 | # -*- coding: utf-8 -*-
#
# Copyright 2016 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asynctest
from datetime import datetime
from livebridge_liveblog import LiveblogPost
from tests import load_json
class LiveblogPostTest(asynctest.TestCase):
def setUp(self):
self.post = load_json('post_to_convert.json')
self.images = ["/tmp/one.jpg"]
self.content= "foobaz"
self.lp = LiveblogPost(self.post, content=self.content, images=self.images)
@asynctest.fail_on(unused_loop=False)
def test_init(self):
assert self.lp.data == self.post
assert hasattr(self.lp, "is_deleted") == True
assert hasattr(self.lp, "is_update") == True
assert hasattr(self.lp, "is_sticky") == True
assert self.lp.id == "urn:newsml:localhost:2016-04-28T11:24:22.973191:666890f6-9054-4f81-81ac-cc6d5f02b2c9"
assert self.lp.source_id == "56fceedda505e600f71959c8"
assert type(self.lp.updated) == datetime
assert type(self.lp.created) == datetime
assert self.lp.created.year == 2016
assert self.lp.created.minute == 24
assert self.lp.updated.year == 2016
assert self.lp.updated.second == 22
assert self.lp.images == self.images
assert self.lp.content == self.content
@asynctest.fail_on(unused_loop=False)
def test_get_action(self):
# ignore/submitted
self.lp._existing = None
self.post["post_status"] = "submitted"
assert self.lp.get_action() == "ignore"
# ignore/draft
self.post["post_status"] = "draft"
assert self.lp.get_action() == "ignore"
# no ignore, post is known
self.lp._existing = {"foo":"baz"}
assert self.lp.get_action() != "ignore"
# should be update
self.post["post_status"] = ""
assert self.lp.get_action() == "update"
# test delete
self.lp._deleted = True
assert self.lp.get_action() == "delete"
# test ignore for unknown
self.lp._deleted = None
self.lp._existing = None
assert self.lp.get_action() == "create"
# test ignore for deleted
self.lp._deleted = True
assert self.lp.get_action() == "ignore"
@asynctest.fail_on(unused_loop=False)
def test_is_not_delete(self):
assert self.lp.is_deleted == False
@asynctest.fail_on(unused_loop=False)
def test_is_deleted(self):
self.lp.data["deleted"] = True
assert self.lp.is_deleted == True
self.lp._deleted = False
assert self.lp.is_deleted == False
@asynctest.fail_on(unused_loop=False)
def test_is_deleted_unpublished(self):
self.lp.data["unpublished_date"] = "2016-05-06T15:00:59+00:00"
self.lp.data["published_date"] = "2016-05-06T15:00:39+00:00"
assert self.lp.is_deleted == True
@asynctest.fail_on(unused_loop=False)
def test_is_sticky(self):
assert self.lp.is_sticky == False
self.lp.data["sticky"] = True
assert self.lp.is_sticky == True
@asynctest.fail_on(unused_loop=False)
def test_is_highlighted(self):
assert self.lp.is_highlighted == False
self.lp.data["lb_highlight"] = True
assert self.lp.is_highlighted == True
@asynctest.fail_on(unused_loop=False)
def test_is_submitted(self):
assert self.lp.is_submitted == False
self.lp.data["post_status"] = "submitted"
assert self.lp.is_submitted == True
@asynctest.fail_on(unused_loop=False)
def test_is_draft(self):
assert self.lp.is_draft == False
self.lp.data["post_status"] = "draft"
assert self.lp.is_draft == True
@asynctest.fail_on(unused_loop=False)
def test_is_update(self):
self.lp.data["_created"] = "new"
self.lp.data["_updated"] = "new"
assert self.lp.is_update == False
self.lp.data["_updated"] = "new2"
assert self.lp.is_update == True
@asynctest.fail_on(unused_loop=False)
def test_existing(self):
assert self.lp.get_existing() == None
assert self.lp.is_known == False
self.lp.set_existing({"foo": "baz"})
assert self.lp.get_existing() == {"foo": "baz"}
assert self.lp.is_known == True
@asynctest.fail_on(unused_loop=False)
def test_target_doc(self):
assert self.lp.target_doc == None
self.lp._existing = {"target_doc": {"doc": "foo"}}
assert self.lp.target_doc == self.lp._existing["target_doc"]
@asynctest.fail_on(unused_loop=False)
def test_target_id(self):
assert self.lp._target_id == None
self.lp._target_id = "foobaz"
assert self.lp.target_id == "foobaz"
@asynctest.fail_on(unused_loop=False)
def test_target_id_from_existing(self):
self.lp.set_existing({"target_id": "foobaz"})
assert self.lp.target_id == "foobaz"
| apache-2.0 | -704,934,657,416,254,800 | 35.126667 | 116 | 0.627422 | false |
jbkalmbach/kbmod | analysis/create_stamps.py | 1 | 11661 | import os
import random
import csv
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
from astropy.io import fits
class create_stamps(object):
def __init__(self):
return
def load_lightcurves(self, lc_filename, lc_index_filename):
lc = []
lc_index = []
with open(lc_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
lc.append(np.array(row, dtype=np.float))
with open(lc_index_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
lc_index.append(np.array(row, dtype=np.int))
return lc, lc_index
def load_psi_phi(self, psi_filename, phi_filename, lc_index_filename):
psi = []
phi = []
lc_index = []
with open(psi_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
psi.append(np.array(row, dtype=np.float))
with open(phi_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
phi.append(np.array(row, dtype=np.float))
with open(lc_index_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
lc_index.append(np.array(row, dtype=np.int))
return(psi, phi, lc_index)
def load_times(self, time_filename):
times = []
with open(time_filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
times.append(np.array(row, dtype=np.float))
return times
def load_stamps(self, stamp_filename):
stamps = np.genfromtxt(stamp_filename)
if len(np.shape(stamps)) < 2:
stamps = np.array([stamps])
stamp_normalized = stamps/np.sum(stamps, axis=1).reshape(len(stamps), 1)
return stamp_normalized
def stamp_filter(self, stamps, center_thresh, verbose=True):
keep_stamps = np.where(np.max(stamps, axis=1) > center_thresh)[0]
if verbose:
print('Center filtering keeps %i out of %i stamps.'
% (len(keep_stamps), len(stamps)))
return keep_stamps
def load_results(self, res_filename):
results = np.genfromtxt(res_filename, usecols=(1,3,5,7,9,11,13),
names=['lh', 'flux', 'x', 'y', 'vx', 'vy', 'num_obs'])
return results
def plot_all_stamps(
self, results, lc, lc_index, coadd_stamp, stamps, sample=False):
"""Plot the coadded and individual stamps of the candidate object
along with its lightcurve.
"""
# Set the rows and columns for the stamp subplots.
# These will affect the size of the lightcurve subplot.
numCols=5
# Find the number of subplots to make.
numPlots = len(stamps)
# Compute number of rows for the plot
numRows = numPlots // numCols
# Add a row if numCols doesn't divide evenly into numPlots
if (numPlots % numCols):
numRows+=1
# Add a row if numRows=1. Avoids an error caused by ax being 1D.
if (numRows==1):
numRows+=1
# Add a row for the lightcurve subplots
numRows+=1
if sample:
numRows=4
# Plot the coadded stamp and the lightcurve
# Generate the stamp plots, setting the size with figsize
fig,ax = plt.subplots(nrows=numRows,ncols=numCols,
figsize=[3.5*numCols,3.5*numRows])
# In the first row, we only want the coadd and the lightcurve.
# Delete all other axes.
for i in range(numCols):
if i>1:
fig.delaxes(ax[0,i])
# Plot coadd and lightcurve
x_values = np.linspace(1,len(lc),len(lc))
coadd_stamp = coadd_stamp.reshape(21,21)
ax[0,0].imshow(coadd_stamp)
ax[0,1] = plt.subplot2grid((numRows,numCols), (0,1),colspan=4,rowspan=1)
ax[0,1].plot(x_values,lc,'b')
ax[0,1].plot(x_values[lc==0],lc[lc==0],'g',lw=4)
ax[0,1].plot(x_values[lc_index],lc[lc_index],'r.',ms=15)
ax[0,1].xaxis.set_ticks(x_values)
res_line = results
ax[0,1].set_title('Pixel (x,y) = (%i, %i), Vel. (x,y) = (%f, %f), Lh = %f' %
(res_line['x'], res_line['y'], res_line['vx'],
res_line['vy'], res_line['lh']))
plt.xticks(np.arange(min(x_values), max(x_values)+1, 5.0))
# Turn off all axes. They will be turned back on for proper plots.
for row in ax[1:]:
for column in row:
column.axis('off')
size = 21
sigma_x = 1.4
sigma_y = 1.4
x = np.linspace(-10, 10, size)
y = np.linspace(-10, 10, size)
x, y = np.meshgrid(x, y)
gaussian_kernel = (1/(2*np.pi*sigma_x*sigma_y)
* np.exp(-(x**2/(2*sigma_x**2) + y**2/(2*sigma_y**2))))
sum_pipi = np.sum(gaussian_kernel**2)
noise_kernel = np.zeros((21,21))
x_mask = np.logical_or(x>5, x<-5)
y_mask = np.logical_or(y>5, y<-5)
mask = np.logical_or(x_mask,y_mask)
noise_kernel[mask] = 1
SNR = np.zeros(len(stamps))
signal = np.zeros(len(stamps))
noise = np.zeros(len(stamps))
# Plot stamps of individual visits
axi=1
axj=0
if sample:
mask = np.array(random.sample(range(1,len(stamps)),15))
else:
mask = np.linspace(0,len(stamps),len(stamps)+1)
for j,stamp in enumerate(stamps):
signal[j] = np.sum(stamp*gaussian_kernel)
noise[j] = np.var(stamp*noise_kernel)
SNR[j] = signal[j]/np.sqrt(noise[j]*sum_pipi)
if (mask == j).any():
im = ax[axi,axj].imshow(stamp)
ax[axi,axj].set_title(
'visit={0:d} | SNR={1:.2f}'.format(j+1,SNR[j]))
ax[axi,axj].axis('on')
# If KBMOD says the index is valid, highlight in red
if (lc_index==j).any():
for axis in ['top','bottom','left','right']:
ax[axi,axj].spines[axis].set_linewidth(4)
ax[axi,axj].spines[axis].set_color('r')
ax[axi,axj].tick_params(axis='x', colors='red')
ax[axi,axj].tick_params(axis='y', colors='red')
# Compute the axis indexes for the next iteration
if axj<numCols-1:
axj += 1
else:
axj = 0
axi += 1
coadd_signal = np.sum(coadd_stamp*gaussian_kernel)
coadd_noise = np.var(coadd_stamp*noise_kernel)
coadd_SNR = coadd_signal/np.sqrt(coadd_noise*sum_pipi)
Psi = np.sum(signal[lc_index]/noise[lc_index])
Phi = np.sum(sum_pipi/noise[lc_index])
summed_SNR = Psi/np.sqrt(Phi)
ax[0,0].set_title(
'Total SNR={:.2f}'.format(coadd_SNR))
#ax[0,0].set_title(
# 'Total SNR={:.2f}\nSummed SNR={:.2f}'.format(coadd_SNR,summed_SNR))
for axis in ['top','bottom','left','right']:
ax[0,0].spines[axis].set_linewidth(4)
ax[0,0].spines[axis].set_color('r')
ax[0,0].tick_params(axis='x', colors='red')
ax[0,0].tick_params(axis='y', colors='red')
return(fig)
def plot_stamps(self, results, lc, lc_index, stamps, center_thresh, fig=None):
keep_idx = self.stamp_filter(stamps, center_thresh)
if fig is None:
fig = plt.figure(figsize=(12, len(lc_index)*2))
for i,stamp_idx in enumerate(keep_idx):
current_lc = lc[stamp_idx]
current_lc_index = lc_index[stamp_idx]
x_values = np.linspace(1,len(current_lc),len(current_lc))
fig.add_subplot(len(keep_idx),2,(i*2)+1)
plt.imshow(stamps[stamp_idx].reshape(21,21))
fig.add_subplot(len(keep_idx),2,(i*2)+2)
plt.plot(x_values,current_lc,'b')
plt.plot(x_values[current_lc==0],current_lc[current_lc==0],'g',lw=4)
plt.plot(x_values[current_lc_index],current_lc[current_lc_index],'r.',ms=15)
plt.xticks(x_values)
res_line = results[stamp_idx]
plt.title('Pixel (x,y) = (%i, %i), Vel. (x,y) = (%f, %f), Lh = %f, index = %i' %
(res_line['x'], res_line['y'], res_line['vx'],
res_line['vy'], res_line['lh'], stamp_idx))
plt.tight_layout()
return fig
def target_stamps(
self, results, lc, lc_index, stamps, center_thresh,
target_xy, target_vel=None, vel_tol=5, atol=10,
title_info=None):
keep_idx = self.stamp_filter(stamps, center_thresh, verbose=False)
# Count the number of objects within atol of target_xy
count=0
object_found=False
for i,stamp_idx in enumerate(keep_idx):
res_line = results[stamp_idx]
if target_vel is not None:
vel_truth = (
np.isclose(res_line['vx'], target_vel[0], atol=vel_tol) and
np.isclose(res_line['vy'], target_vel[1], atol=vel_tol))
else:
vel_truth = True
if (np.isclose(res_line['x'],target_xy[0],atol=atol)
and np.isclose(res_line['y'],target_xy[1],atol=atol)
and vel_truth):
count+=1
# Plot lightcurves of objects within atol of target_xy
if count>0:
object_found=True
else:
return(0,False)
y_size = count
fig = plt.figure(figsize=(12, 2*y_size))
count=0
for i,stamp_idx in enumerate(keep_idx):
res_line = results[stamp_idx]
if target_vel is not None:
vel_truth = (
np.isclose(res_line['vx'], target_vel[0], atol=vel_tol) and
np.isclose(res_line['vy'], target_vel[1], atol=vel_tol))
else:
vel_truth = True
if (np.isclose(res_line['x'],target_xy[0],atol=atol)
and np.isclose(res_line['y'],target_xy[1],atol=atol)
and vel_truth):
current_lc = lc[stamp_idx]
current_lc_index = lc_index[stamp_idx]
x_values = np.linspace(1,len(current_lc),len(current_lc))
fig.add_subplot(y_size,2,(count*2)+1)
plt.imshow(stamps[stamp_idx].reshape(21,21))
fig.add_subplot(y_size,2,(count*2)+2)
plt.plot(x_values,current_lc,'b')
plt.plot(x_values[current_lc==0],current_lc[current_lc==0],'g',lw=4)
plt.plot(x_values[current_lc_index],current_lc[current_lc_index],'r.',ms=15)
plt.xticks(x_values)
title = 'Pixel (x,y) = ({}, {}), Vel. (x,y) = ({}, {}), Lh = {}, index = {}'
if title_info is not None:
title = title_info+'\n'+title
plt.title(title.format(
res_line['x'], res_line['y'], res_line['vx'],
res_line['vy'], res_line['lh'], stamp_idx))
count+=1
plt.tight_layout()
return(fig, object_found)
def calc_mag(self, image_files, lc, idx_list):
flux_vals = []
for filenum, lc_val in zip(idx_list, lc):
hdulist = fits.open(image_files[int(filenum)])
j_flux = lc_val/hdulist[0].header['FLUXMAG0']
flux_vals.append(j_flux)
return -2.5*np.log10(np.mean(flux_vals))
| bsd-2-clause | -6,406,296,244,992,474,000 | 38.934932 | 93 | 0.522425 | false |
hdknr/django-mediafiles | src/mediafiles/tests.py | 1 | 7095 | # -*- coding: utf-8 -*-
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase,client
from django.core.urlresolvers import reverse,resolve
from django.conf import settings
from django.http import HttpResponse,HttpResponseRedirect,Http404
from urllib import urlparse,urlencode
from models import *
import os
class RootTest(TestCase):
def setUp(self):
#: site admins
self.admins_name=[ 'admin1','admin2']
self.admins = [ User.objects.get_or_create(
username=u,
is_staff=True,is_superuser=True,)[0]
for u in self.admins_name ]
#: users
self.users_name=['member1','member2']
self.users = [ User.objects.get_or_create(
username=u)[0]
for u in self.users_name ]
#: password
map(lambda u: u.set_password(u.username), self.users )
map(lambda u: u.save(), self.users )
map(lambda u: u.set_password(u.username), self.admins)
map(lambda u: u.save(), self.admins)
def auth(self,user=None):
if user:
self.assertTrue( User.objects.filter(id =user.id).exists() )
self.assertTrue( self.client.login(username=user.username, password=user.username,) )
def get(self,name,status_code=200,user=None,msg="Exception",query={},**kwargs):
''' response object
'''
self.auth( user )
self.last_url = reverse( name,kwargs=kwargs )
if query:
self.last_url = self.last_url +"?" + urlencode(query)
response = self.client.get( self.last_url )
self.assertEqual(response.status_code,status_code,msg)
return response
def post(self,name,status_code=200,user=None,msg="Exception",query={},form={},**kwargs):
''' response object
'''
self.auth( user )
self.last_url = reverse( name,kwargs=kwargs )
if query:
self.last_url = self.last_url +"?" + urlencode(query)
response = self.client.post( self.last_url,form )
self.assertEqual(response.status_code,status_code,msg)
return response
class MediaFileTest(RootTest):
def upload_file(self,gallery_id,image_file):
response = None
with open(image_file) as fp :
response = self.post('gallery_admin_media_create',
user =self.users[0], form={ 'data': fp, } ,id=gallery_id)
return response
def download(self,url,target='/tmp', update=False):
import os
import requests
fname = os.path.join(target,url.split('/')[-1:][0] )
if update or not os.path.isfile(fname):
with open(fname, 'wb') as f:
r = requests.get(url,stream=True)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return fname
def test_upload(self):
''' python manage.py test mediafiles.MediaFileTest.test_upload'''
image_file = os.path.join(
os.path.dirname(
os.path.abspath( __file__)), 'fixtures/papi.jpg')
g = Gallery()
g.save()
self.assertEqual( g.medias.count(),0 )
self.assertEqual( MediaFile.objects.count() ,0)
response = self.upload_file( g.id, image_file )
self.assertEqual( g.medias.count(),1 )
self.assertEqual( MediaFile.objects.count() ,1)
mediafile = MediaFile.objects.all()[0]
self.assertEqual( mediafile.mimetype, 'image/jpeg')
media_path = mediafile.data.path
mediafile.delete()
self.assertFalse( os.path.isfile( media_path ) )
def test_pdf(self):
''' python manage.py test mediafiles.MediaFileTest.test_pdf'''
url = "http://www.city.shibuya.tokyo.jp/city/saiyo/pdf/saiyo_annai.pdf"
pdf_file =self.download(url)
g = Gallery()
g.save()
self.assertEqual( g.medias.count(),0 )
self.assertEqual( MediaFile.objects.count() ,0)
response = self.upload_file( g.id, pdf_file )
self.assertEqual( g.medias.count(),1 )
self.assertEqual( MediaFile.objects.count() ,1)
mediafile = MediaFile.objects.all()[0]
self.assertEqual( mediafile.mimetype, 'application/pdf')
media_path = mediafile.data.path
print mediafile.data.path
mediafile.pdf_to_images()
import glob
from models import UPLOAD_TMP_DIR
ext = "jpg"
jpgs = glob.glob( os.path.join(UPLOAD_TMP_DIR, "pdf.*.jpg" ))
print jpgs
jpg_medias = MediaFile.objects.filter(mimetype="image/jpeg" )
#:ファイル名ルールチェック
import re
self.assertEqual(jpg_medias.count(), len(jpgs) )
for p in jpg_medias:
self.assertIsNotNone( re.search(r"pdf.%d.\d+.jpg" % mediafile.id, p.name ) )
#:削除
mediafile.delete()
self.assertFalse( os.path.isfile( media_path ) )
def save_pdf(self):
''' python manage.py test mediafiles.MediaFileTest.save_pdf'''
url = "http://www.city.shibuya.tokyo.jp/city/saiyo/pdf/saiyo_annai.pdf"
pdf_file =self.download(url)
m=MediaFile.create(pdf_file)
self.assertEqual(MediaFile.objects.count(),1)
self.assertEqual( m.mimetype, 'application/pdf')
print m.mimetype, m.data.path
def test_thumbnail(self):
''' python manage.py test mediafiles.MediaFileTest.test_thumbnail'''
image_file = os.path.join(
os.path.dirname(
os.path.abspath( __file__)), 'fixtures/papi.jpg')
g = Gallery()
g.save()
response = self.upload_file( g.id, image_file )
mediafile = MediaFile.objects.all()[0]
from thumbs import cached_thumb
ret = cached_thumb( mediafile.data )
response = self.get("gallery_admin_media_thumb",
id=g.id ,mid=mediafile.id,query={'width':100, 'height':30,})
self.assertEqual(response['Content-Type'],"image/jpeg" )
import hashlib
hash_content = hashlib.md5( response.content).hexdigest()
hash_file = hashlib.md5( open( mediafile.thumb_path(size=(100,30))).read() ).hexdigest()
self.assertEqual( hash_content,hash_file )
response = self.get("mediafiles_thumbnail",id=mediafile.id, width=100, height=30)
hash_content_2 = hashlib.md5( response.content).hexdigest()
self.assertEqual( hash_content,hash_content_2)
self.assertEqual(self.last_url , mediafile.get_thumbnail_url(size=(100,30) ) )
#: clean image files
mediafile.delete()
| mit | -5,386,137,962,503,259,000 | 34.335 | 97 | 0.583699 | false |
williewonka/CoTC_Tools | Check_Data.py | 1 | 1151 | __author__ = 'williewonka'
#this tool checks if the provinces title exists and if this title has an owner
import json
# import random
#dictionary that keeps track of the number of existing members of a dynastie, that way no conflicts will emerge
existing_dynasties = {}
#id numbering: 001 + dynasty id
jsonfile = open("provinces.json", "r")
provinces = json.loads(jsonfile.readlines()[0])
# jsonfile = open("dynasties_per_culture.json", "r")
# dyna_per_culture = json.loads(jsonfile.readlines()[0])
#iterate through all the provinces
for province in provinces:
#open the title document to see if the title is already to assigned to someone
try:#check if the stream throws error, to see if the title exists yet
filestream = open("titles/" + province["title"] + ".txt")
except:
print("title " + province["title"] + " does not exists")
holder = False
#iterate through file and look for a holder of the title
for l in filestream:
line = l.split("#")[0]
if "holder" in line:
holder = True
if not holder:
print("title " + province['title'] + " has no holder")
print("done") | gpl-2.0 | 4,775,298,376,574,635,000 | 36.16129 | 111 | 0.676803 | false |
minlexx/pyevemon | esi_client/models/put_fleets_fleet_id_wings_wing_id_naming.py | 1 | 3247 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PutFleetsFleetIdWingsWingIdNaming(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None):
"""
PutFleetsFleetIdWingsWingIdNaming - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str'
}
self.attribute_map = {
'name': 'name'
}
self._name = name
@property
def name(self):
"""
Gets the name of this PutFleetsFleetIdWingsWingIdNaming.
name string
:return: The name of this PutFleetsFleetIdWingsWingIdNaming.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PutFleetsFleetIdWingsWingIdNaming.
name string
:param name: The name of this PutFleetsFleetIdWingsWingIdNaming.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
if name is not None and len(name) > 10:
raise ValueError("Invalid value for `name`, length must be less than or equal to `10`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PutFleetsFleetIdWingsWingIdNaming):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 | 5,606,025,348,482,912,000 | 25.834711 | 99 | 0.530336 | false |
purpleidea/gedit-plugins | plugins/bracketcompletion/bracketcompletion.py | 1 | 10661 | # -*- coding: utf-8 -*-
#
# bracketcompletion.py - Bracket completion plugin for gedit
#
# Copyright (C) 2006 - Steve Frécinaux
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
from gi.repository import GObject, Gtk, Gdk, Gedit
common_brackets = {
'(' : ')',
'[' : ']',
'{' : '}',
'"' : '"',
"'" : "'",
}
close_brackets = {
')' : '(',
']' : '[',
'}' : '{',
}
language_brackets = {
'changelog': { '<' : '>' },
'html': { '<' : '>' },
'ruby': { '|' : '|', 'do': 'end' },
'sh': { '`' : '`' },
'xml': { '<' : '>' },
'php': { '<' : '>' },
}
class BracketCompletionPlugin(GObject.Object, Gedit.ViewActivatable):
__gtype_name__ = "BracketCompletion"
view = GObject.property(type=Gedit.View)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
self._doc = self.view.get_buffer()
self._last_iter = None
self._stack = []
self._relocate_marks = True
self.update_language()
# Add the markers to the buffer
insert = self._doc.get_iter_at_mark(self._doc.get_insert())
self._mark_begin = self._doc.create_mark(None, insert, True)
self._mark_end = self._doc.create_mark(None, insert, False)
self._handlers = [
None,
None,
self.view.connect('notify::editable', self.on_notify_editable),
self._doc.connect('notify::language', self.on_notify_language),
None,
]
self.update_active()
def do_deactivate(self):
if self._handlers[0]:
self.view.disconnect(self._handlers[0])
self.view.disconnect(self._handlers[1])
self._doc.disconnect(self._handlers[4])
self.view.disconnect(self._handlers[2])
self._doc.disconnect(self._handlers[3])
self._doc.delete_mark(self._mark_begin)
self._doc.delete_mark(self._mark_end)
def update_active(self):
# Don't activate the feature if the buffer isn't editable or if
# there are no brackets for the language
active = self.view.get_editable() and \
self._brackets is not None
if active and self._handlers[0] is None:
self._handlers[0] = self.view.connect('event-after',
self.on_event_after)
self._handlers[1] = self.view.connect('key-press-event',
self.on_key_press_event)
self._handlers[4] = self._doc.connect('delete-range',
self.on_delete_range)
elif not active and self._handlers[0] is not None:
self.view.disconnect(self._handlers[0])
self._handlers[0] = None
self.view.disconnect(self._handlers[1])
self._handlers[1] = None
self._doc.disconnect(self._handlers[4])
self._handlers[4] = None
def update_language(self):
lang = self._doc.get_language()
if lang is None:
self._brackets = None
return
lang_id = lang.get_id()
if lang_id in language_brackets:
self._brackets = language_brackets[lang_id]
# we populate the language-specific brackets with common ones lazily
self._brackets.update(common_brackets)
else:
self._brackets = common_brackets
# get the corresponding keyvals
self._bracket_keyvals = set()
for b in self._brackets:
kv = Gdk.unicode_to_keyval(ord(b[-1]))
if (kv):
self._bracket_keyvals.add(kv)
for b in close_brackets:
kv = Gdk.unicode_to_keyval(ord(b[-1]))
if (kv):
self._bracket_keyvals.add(kv)
def get_current_token(self):
end = self._doc.get_iter_at_mark(self._doc.get_insert())
start = end.copy()
word = None
if end.ends_word() or (end.inside_word() and not end.starts_word()):
start.backward_word_start()
word = self._doc.get_text(start, end)
if not word and start.backward_char():
word = start.get_char()
if word.isspace():
word = None
if word:
return word, start, end
else:
return None, None, None
def get_next_token(self):
start = self._doc.get_iter_at_mark(self._doc.get_insert())
end = start.copy()
word = None
if start.ends_word() or (start.inside_word() and not start.starts_word()):
end.forward_word_end()
word = self._doc.get_text(start, end)
if not word:
word = start.get_char()
if word.isspace():
word = None
if word:
return word, start, end
else:
return None, None, None
def compute_indentation (self, cur):
"""
Compute indentation at the given iterator line
view : gtk.TextView
cur : gtk.TextIter
"""
start = self._doc.get_iter_at_line(cur.get_line())
end = start.copy();
c = end.get_char()
while c.isspace() and c not in ('\n', '\r') and end.compare(cur) < 0:
if not end.forward_char():
break
c = end.get_char()
if start.equal(end):
return ''
return start.get_slice(end)
def on_notify_language(self, view, pspec):
self.update_language()
self.update_active()
def on_notify_editable(self, view, pspec):
self.update_active()
def on_key_press_event(self, view, event):
if event.state & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK):
return False
if event.keyval in (Gdk.KEY_Left, Gdk.KEY_Right):
self._stack = []
if event.keyval == Gdk.KEY_BackSpace:
self._stack = []
if self._last_iter == None:
return False
iter = self._doc.get_iter_at_mark(self._doc.get_insert())
iter.backward_char()
self._doc.begin_user_action()
self._doc.delete(iter, self._last_iter)
self._doc.end_user_action()
self._last_iter = None
return True
if event.keyval in (Gdk.KEY_Return, Gdk.KEY_KP_Enter) and \
view.get_auto_indent() and self._last_iter != None:
# This code has barely been adapted from gtksourceview.c
# Note: it might break IM!
mark = self._doc.get_insert()
iter = self._doc.get_iter_at_mark(mark)
indent = self.compute_indentation(iter)
indent = "\n" + indent
# Insert new line and auto-indent.
self._doc.begin_user_action()
self._doc.insert(iter, indent)
self._doc.insert(iter, indent)
self._doc.end_user_action()
# Leave the cursor where we want it to be
iter.backward_chars(len(indent))
self._doc.place_cursor(iter)
self.view.scroll_mark_onscreen(mark)
self._last_iter = None
return True
self._last_iter = None
return False
def on_event_after(self, view, event):
if event.type != Gdk.EventType.KEY_PRESS or \
event.state & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK) or \
event.keyval not in self._bracket_keyvals:
return
# Check if the insert mark is in the range of mark_begin to mark_end
# if not we free the stack
insert = self._doc.get_insert()
iter_begin = self._doc.get_iter_at_mark(self._mark_begin)
iter_end = self._doc.get_iter_at_mark(self._mark_end)
insert_iter = self._doc.get_iter_at_mark(insert)
if not iter_begin.equal(iter_end):
if not insert_iter.in_range(iter_begin, iter_end):
self._stack = []
self._relocate_marks = True
# Check if the word is not in our brackets
word, start, end = self.get_current_token()
if word not in self._brackets and word not in close_brackets:
return
# If we didn't insert brackets yet we insert them in the insert mark iter
if self._relocate_marks == True:
insert_iter = self._doc.get_iter_at_mark(insert)
self._doc.move_mark(self._mark_begin, insert_iter)
self._doc.move_mark(self._mark_end, insert_iter)
self._relocate_marks = False
# Depending on having close bracket or a open bracket we get the opposed
# bracket
bracket = None
bracket2 = None
if word not in close_brackets:
self._stack.append(word)
bracket = self._brackets[word]
else:
bracket2 = close_brackets[word]
word2, start2, end2 = self.get_next_token()
# Check to skip the closing bracket
# Example: word = ) and word2 = )
if word == word2:
if bracket2 != None and self._stack != [] and \
self._stack[len(self._stack) - 1] == bracket2:
self._stack.pop()
self._doc.handler_block(self._handlers[4])
self._doc.delete(start, end)
self._doc.handler_unblock(self._handlers[4])
end.forward_char()
self._doc.place_cursor(end)
return
# Insert the closing bracket
if bracket != None:
self._doc.begin_user_action()
self._doc.insert(end, bracket)
self._doc.end_user_action()
# Leave the cursor when we want it to be
self._last_iter = end.copy()
end.backward_chars(len(bracket))
self._doc.place_cursor(end)
def on_delete_range(self, doc, start, end):
self._stack = []
# ex:ts=4:et:
| gpl-2.0 | -3,347,244,695,238,270,000 | 32.734177 | 90 | 0.54925 | false |
Spirotot/py3status | py3status/modules/twitch_streaming.py | 1 | 2788 | """
Checks if a Twitch streamer is online.
Checks if a streamer is online using the Twitch Kraken API to see
if a channel is currently streaming or not.
Configuration parameters
cache_timeout: how often we refresh this module in seconds
(default 10)
format: Display format when online
(default "{stream_name} is live!")
format_offline: Display format when offline
(default "{stream_name} is offline.")
format_invalid: Display format when streamer does not exist
(default "{stream_name} does not exist!")
stream_name: name of streamer(twitch.tv/<stream_name>)
(default None)
Format of status string placeholders
{stream_name}: name of the streamer
@author Alex Caswell [email protected]
@license BSD
"""
import requests
class Py3status:
# available configuration parameters
# can be customized in i3status.conf
cache_timeout = 10
format = "{stream_name} is live!"
format_offline = "{stream_name} is offline."
format_invalid = "{stream_name} does not exist!"
stream_name = None
def __init__(self):
self._display_name = None
def _get_display_name(self):
url = 'https://api.twitch.tv/kraken/users/' + self.stream_name
display_name_request = requests.get(url)
self._display_name = display_name_request.json().get('display_name')
def is_streaming(self, i3s_output_list, i3s_config):
if self.stream_name is None:
return {
'full_text': 'stream_name missing',
'cached_until': self.py3.CACHE_FOREVER
}
r = requests.get('https://api.twitch.tv/kraken/streams/' + self.stream_name)
if not self._display_name:
self._get_display_name()
if 'error' in r.json():
colour = i3s_config['color_bad']
full_text = self.format_invalid.format(stream_name=self.stream_name)
elif r.json().get('stream') is None:
colour = i3s_config['color_bad']
full_text = self.format_offline.format(stream_name=self._display_name)
elif r.json().get('stream') is not None:
colour = i3s_config['color_good']
full_text = self.format.format(stream_name=self._display_name)
else:
colour = i3s_config['color_bad']
full_text = "An unknown error has occurred."
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text,
'color': colour
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
config = {
'stream_name': 'moo'
}
from py3status.module_test import module_test
module_test(Py3status, config=config)
| bsd-3-clause | -2,127,391,554,352,208,100 | 32.190476 | 84 | 0.616212 | false |
blissland/blissflixx | chls/bfch_itv_player/__init__.py | 1 | 2847 | from chanutils import get_doc, select_all, select_one, get_json
from chanutils import get_attr, get_text, get_text_content
from playitem import PlayItem, PlayItemList, MoreEpisodesAction
_FEEDLIST = [
{'title':'Popular', 'url':'http://www.itv.com'},
{'title':'All Shows', 'url':'http://www.itv.com/hub/shows'},
{'title':'Children', 'url':'http://www.itv.com/hub/categories/children'},
{'title':'Comedy', 'url':'http://www.itv.com/hub/categories/comedy'},
{'title':'Drama & Soaps', 'url':'http://www.itv.com/hub/categories/drama-soaps'},
{'title':'Entertainment', 'url':'http://www.itv.com/hub/categories/entertainment'},
{'title':'Factual', 'url':'http://www.itv.com/hub/categories/factual'},
{'title':'Films', 'url':'http://www.itv.com/hub/categories/films'},
{'title':'Sport', 'url':'http://www.itv.com/hub/categories/sport'},
]
_ALL_SHOWS_URL = "https://www.itv.com/hub/api/sayt"
def name():
return 'ITV Player'
def image():
return 'icon.png'
def description():
return "ITV Player Channel (<a target='_blank' href='https://www.itv.com/hub'>https://www.itv.com/hub</a>). Geo-restricted to UK."
def feedlist():
return _FEEDLIST
def feed(idx):
url = _FEEDLIST[idx]['url']
doc = get_doc(url)
rtree = select_all(doc, "a.complex-link")
results = PlayItemList()
for l in rtree:
url = get_attr(l, 'href')
el = select_one(l, '.tout__title')
if el is None:
el = select_one(l, '.slice__title')
if el is None:
continue
title = get_text(el)
el = select_one(l, 'img.fluid-media__media')
img = get_attr(el, 'src')
el = select_one(l, 'p.tout__meta')
if el is None:
el = select_one(l, 'p.slice__meta')
subtitle = get_text_content(el)
if subtitle == 'No episodes available':
continue
item = PlayItem(title, img, url, subtitle)
if subtitle != '1 episode':
item.add_action(MoreEpisodesAction(url, title))
results.add(item)
return results
def search(q):
shows = get_json(_ALL_SHOWS_URL)
results = PlayItemList()
for i in shows:
if not 'episode' in i['url']:
continue
if q.lower() in i['title'].lower():
results.add(PlayItem(i['title'], i['image']['jelly'], i['url']['episode'], i['synopses']))
return results
def showmore(link):
doc = get_doc(link)
rtree = select_all(doc, "a.complex-link")
results = PlayItemList()
for l in rtree:
url = get_attr(l, 'href')
el = select_one(l, 'img.fluid-media__media')
img = get_attr(el, 'src')
el = select_one(l, 'h3')
title = get_text(el)
el = select_one(l, 'time')
subtitle = ""
if el is not None and el.text is not None:
subtitle = get_text(el)
el = select_one(l, 'p.tout__summary')
synopsis = get_text(el)
item = PlayItem(title, img, url, subtitle, synopsis)
results.add(item)
return results
| gpl-2.0 | -5,759,560,689,227,298,000 | 32.104651 | 133 | 0.626273 | false |
deapplegate/wtgpipeline | advanced_calc.py | 1 | 10292 |
# entry point for the input form to pass values back to this script
def setValues(tH0,tWM,tWV,tz,tmnue,tmnumu,tmnutau,tw,twp,tT0):
H0 = tH0
h = H0/100
WM = tWM
WV = tWV
z = tz
WR = 2.477E-5/(h*h) # does not include neutrinos, T0 = 2.72528
WK = 1-WM-WR-WV
mnue = tmnue
mnumu = tmnumu
mnutau = tmnutau
w = tw
wp = twp
T0 = tT0
compute()
# tangential comoving distance
def DCMT(WK,DCMR):
import math
ratio = 1.00
x = math.sqrt(abs(WK))*DCMR
# document.writeln("DCMR = " + DCMR + "<BR>")
# document.writeln("x = " + x + "<BR>")
if (x > 0.1):
if (WK > 0) : ratio = 0.5*(math.exp(x)-math.exp(-x))/x
else: ratio = math.sin(x)/x
# document.writeln("ratio = " + ratio + "<BR>")
y = ratio*DCMR
return y
y = x*x
# statement below fixed 13-Aug-03 to correct sign error in expansion
if (WK < 0): y = -y
ratio = 1 + y/6 + y*y/120
# document.writeln("ratio = " + ratio + "<BR>")
y= ratio*DCMR
return y
# comoving volume computation
def VCM(WK,DCMR):
import math
ratio = 1.00
x = math.sqrt(abs(WK))*DCMR
if (x > 0.1) :
if (WK > 0) : ratio = (0.125*(math.exp(2*x)-math.exp(-2*x))-x/2)/(x*x*x/3)
else: ratio =(x/2 - math.sin(2*x)/4)/(x*x*x/3)
y = ratio*DCMR*DCMR*DCMR/3
return y
y = x*x
# statement below fixed 13-Aug-03 to correct sign error in expansion
if (WK < 0): y = -y
ratio = 1 + y/5 + (2/105)*y*y
y = ratio*DCMR*DCMR*DCMR/3
return y
# function to give neutrino density over rest mass density
def nurho(mnurel,mnu):
import math
y = math.pow(1+math.pow(mnurel/mnu,1.842),1.0/1.842)
return y
# calculate the actual results
def compute(z,w,WM=0.27,WV=0.73):
i=0 # index
n=1000 # number of points in integrals
nda = 1 # number of digits in angular size distance
H0 = 71. # Hubble constant
#WM = 0.27 # Omega(matter)
#WV = 0.73 # Omega(vacuum) or lambda
WR = 0. # Omega(radiation)
WK = 0. # Omega curvaturve = 1-Omega(total)
Wnu = 0. # Omega from massive neutrinos
#z = 3.0 # redshift of the object
h = 0.71 # H0/100
mnue = 0.001 # mass of electron neutrino in eV
mnumu = 0.009 # mass of muon neutrino in eV
mnutau = 0.049 # mass of tau neutrino in eV
we = mnue/93. # Omega(nu(e))h^2
wmu = mnumu/93. # Omega(nu(mu))h^2
wtau = mnutau/93. # Omega(nu(tau))h^2
mnurel = 0.0005 # mass of neutrino that is just now relativistic in eV
T0 = 2.72528 # CMB temperature in K
c = 299792.458 # velocity of light in km/sec
Tyr = 977.8 # coefficent for converting 1/H into Gyr
DTT = 0.5 # time from z to now in units of 1/H0
DTT_Gyr = 0.0 # value of DTT in Gyr
age = 0.5 # age of Universe in units of 1/H0
age_Gyr = 0.0 # value of age in Gyr
zage = 0.1 # age of Universe at redshift z in units of 1/H0
zage_Gyr = 0.0 # value of zage in Gyr
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0
DCMR_Gyr = 0.0
DA = 0.0 # angular size distance
DA_Mpc = 0.0
DA_Gyr = 0.0
kpc_DA = 0.0
DL = 0.0 # luminosity distance
DL_Mpc = 0.0
DL_Gyr = 0.0 # DL in units of billions of light years
V_Gpc = 0.0
a = 1.0 # 1/(1+z), the scale factor of the Universe
az = 0.5 # 1/(1+z(object))
#w = -1. # equation of state, w = P/(rno*c^2)
wp = 0. # rate of change of equation of state, w(a) = w+2*wp*(1-a)
# following Linder, astro-ph/040250
import math
h = H0/100.
WR = 2.477E-5*math.pow(T0/2.72528,4)/(h*h) # no neutrinos
# avoid dividing by zero neutrino mass
if (mnue < 0.00001): mnue = 0.00001
if (mnumu < 0.00001): mnumu = 0.00001
if (mnutau < 0.00001): mnutau = 0.00001
# rest mass omega*h^2 for the three neutrino types
we = (mnue/93.64)*math.pow(T0/2.72528,3)
wmu = (mnumu/93.90)*math.pow(T0/2.72528,3)
wtau = (mnutau/93.90)*math.pow(T0/2.72528,3)
# mass of nu that is just now relativistic
# evaluates at 3.151*kT with T = (4/11)^(1/3)*To and To=2.72528
# This is 6.13 K, and 1 eV is 11604.5 K
mnurel = 6.13*(T0/2.72528)/11604.5
Wnu = (we*nurho(mnurel,mnue)+wmu*nurho(mnurel,mnumu)+wtau*nurho(mnurel,mnutau))/(h*h)
WK = 1-WM-WR-WV
WM = WM-Wnu
az = 1.0/(1+1.0*z)
age = 0
# do integral over a=1/(1+z) from 0 to az in n steps, midpoint rule
for i in range(n): #(i = 0 i != n i++) {
a = az*(i+0.5)/n
# rho(DE) = a^{-3-3*w_o-6*w'}*exp(6*w'*(a-1))*rho_o(DE)
# based on w = w_o+w_a*(1-a) with w_a = 2*w': Linder astro-ph/0402503
rhoV = WV*math.pow(a,-3-3*w-6*wp)*math.exp(6*wp*(a-1))
# get neutrino density corrected for kT/mc^2 by using lower mass
# instead of higher T:
Wnu = (we*nurho(mnurel,mnue*a)+wmu*nurho(mnurel,mnumu*a)+wtau*nurho(mnurel,mnutau*a))/(h*h)
adot = math.sqrt(WK+((WM+Wnu)/a)+(WR/(a*a))+(rhoV*a*a))
age = age + 1/adot
zage = az*age/n
# correction for annihilations of particles not present now like e+/e-
# added 13-Aug-03 based on T_vs_t.f
lpz = math.log((1+1.0*z))/math.log(10.0)
dzage = 0
if (lpz > 7.500): dzage = 0.002 * (lpz - 7.500)
if (lpz > 8.000): dzage = 0.014 * (lpz - 8.000) + 0.001
if (lpz > 8.500): dzage = 0.040 * (lpz - 8.500) + 0.008
if (lpz > 9.000): dzage = 0.020 * (lpz - 9.000) + 0.028
if (lpz > 9.500): dzage = 0.019 * (lpz - 9.500) + 0.039
if (lpz > 10.000): dzage = 0.048
if (lpz > 10.775): dzage = 0.035 * (lpz - 10.775) + 0.048
if (lpz > 11.851): dzage = 0.069 * (lpz - 11.851) + 0.086
if (lpz > 12.258): dzage = 0.461 * (lpz - 12.258) + 0.114
if (lpz > 12.382): dzage = 0.024 * (lpz - 12.382) + 0.171
if (lpz > 13.055): dzage = 0.013 * (lpz - 13.055) + 0.188
if (lpz > 14.081): dzage = 0.013 * (lpz - 14.081) + 0.201
if (lpz > 15.107): dzage = 0.214
zage = zage*10.0**dzage
#
zage_Gyr = (Tyr/H0)*zage
DTT = 0.0
DCMR = 0.0
# do integral over a=1/(1+z) from az to 1 in n steps, midpoint rule
for i in range(n):
a = az+(1-az)*(i+0.5)/n
rhoV = WV*math.pow(a,-3-3*w-6*wp)*math.exp(6*wp*(a-1))
Wnu = (we*nurho(mnurel,mnue*a)+wmu*nurho(mnurel,mnumu*a)+wtau*nurho(mnurel,mnutau*a))/(h*h)
adot = math.sqrt(WK+((WM+Wnu)/a)+(WR/(a*a))+(rhoV*a*a))
DTT = DTT + 1/adot
DCMR = DCMR + 1/(a*adot)
#print az
DTT = (1-az)*DTT/n
DCMR = (1-az)*DCMR/n
age = DTT+zage
age_Gyr = age*(Tyr/H0)
DTT_Gyr = (Tyr/H0)*DTT
DCMR_Gyr = (Tyr/H0)*DCMR
DCMR_Mpc = (c/H0)*DCMR
DA = az*DCMT(WK,DCMR)
DA_Mpc = (c/H0)*DA
kpc_DA = DA_Mpc/206.264806
DA_Gyr = (Tyr/H0)*DA
DL = DA/(az*az)
DL_Mpc = (c/H0)*DL
DL_Gyr = (Tyr/H0)*DL
V_Gpc = 4*math.pi*math.pow(0.001*c/H0,3)*VCM(WK,DCMR)
#print 'z',z,'DA_Mpc',DA_Mpc
return DCMR
if __name__ == '__main__':
import pylab
cluster_z_low = 0.2
cluster_z_high = 0.6
for cluster_z in [0.2,0.3,0.55]: #,1.2]:
for w in [-1]: #.5,-1,-0.5,]:
d_cluster_low = compute(cluster_z_low,w)
d_cluster_high = compute(cluster_z_high,w)
d_cluster = compute(cluster_z,w)
refer = (compute(0.8,w) - d_cluster)/compute(0.8,w)
import scipy
ratios_save = []
zs = []
for z in scipy.arange(cluster_z,3.,0.1):
zs.append(z)
s = compute(z,w)
#ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
#nprint ratio, s, d_cluster, z
#ratios.append(ratio)
ratios_save.append((compute(z,w) - d_cluster)/compute(z,w)/refer)
for w in [-1.5,-1,-0.5,]:
d_cluster_low = compute(cluster_z_low,w)
d_cluster_high = compute(cluster_z_high,w)
d_cluster = compute(cluster_z,w)
refer = (compute(0.8,w) - d_cluster)/compute(0.8,w)
import scipy
ratios = []
zs = []
i = 0
for z in scipy.arange(cluster_z,3.,0.1):
zs.append(z)
s = compute(z,w)
#ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
ratio = (d_cluster_high/(1+cluster_z_high))/(d_cluster_low/(1+cluster_z_low))*(s - d_cluster_high)/(s - d_cluster_low)
#print ratio, s, d_cluster, z
#ratios.append(ratio)
ratios.append((compute(z,w) - d_cluster)/compute(z,w)/refer/ratios_save[i])
i += 1
pylab.plot(scipy.array(zs), scipy.array(ratios))
pylab.savefig('shearratio.pdf')
pylab.show()
def compute_cube():
import scipy
dict = {}
for w in [-1]: #scipy.arange(-2,2,0.1):
for WM in [0.3]: #scipy.arange(0,1,0.1#):
WV = 1 - WM
for z in scipy.arange(0,2.5,0.01):
d = compute(z,w,WM,WV)
dict['%.2f' % z + '_' + '%.2f' % w + '_' + '%.2f' % WM] = d #str(z) + '_' + str(w) + '_' + str(WM)] = d
print d, z, w, WM, WV
print dict.keys()
import pickle
f = open('DA.pickle','w')
m = pickle.Pickler(f)
pickle.dump(dict,m)
f.close()
def dist_ratio(zs,cluster_z=0.55,w=-1.,omega_m=0.27,omega_lambda=0.73):
import pylab
#cluster_z = 0.55
ratios = []
for z in zs:
d_cluster = compute(cluster_z,w,omega_m,omega_lambda)
ratios.append((compute(z,w) - d_cluster)/compute(z,w,omega_m,omega_lambda))
import scipy
return scipy.array(ratios)
| mit | -1,979,014,363,781,253,600 | 34.489655 | 137 | 0.518947 | false |
szaghi/MaTiSSe | release/MaTiSSe-0.3.4/matisse/presentation/toc.py | 1 | 7542 | #!/usr/bin/env python
"""
toc.py, module definition of TOC class.
This defines the Table of Contents of the presentation.
"""
# modules loading
# standard library modules: these should be present in any recent python distribution
import re
# modules not in the standard library
from yattag import Doc
# global variables
# regular expressions
__retoc__ = re.compile(r"\$toc(\((?P<deep>[1-3])\))*",re.DOTALL)
# class definition
class TOC(object):
"""
Object handling the table of contents of presntation.
"""
def __init__(self,deep=3):
"""
Parameters
----------
deep : {1,2,3}, optional
depth of printed TOC; 1 => print only sections, 2 => print sections and subsections, 3=> print sections, subsections and slides
Attributes
----------
sections : list
list of sections with the following form
sections = [ #section_number, 'section title', [list of subsections], #slide_number_at_which_section_starts ];
the subsections list has the following form
subsections = [ #subsection_number, 'subsection title', [list of slides], #local_subsection_number, #slide_number_at_which_subsection_starts ];
the slides list has the following form
subsections = [ #slide_number, 'slide title', #local_slide_number ];
deep : {1,2,3}
depth of printed TOC; 1 => print only sections, 2 => print sections and subsections, 3=> print sections, subsections and slides
"""
self.sections = []
self.deep = deep
return
def __str__(self):
return self.pstr()
@staticmethod
def print_section(section,html=False,current=None):
"""Method for printing section data.
Parameters
----------
section: list
section data
html: bool, optional
activate html tags
current: list, optional
list containing current section, subsection and slide number used
for highlighting current slide into the TOC
Returns
-------
str
string containing the pretty printed section data
"""
if html:
doc, tag, text = Doc().tagtext()
with tag('a',href='#slide-'+str(section[3])):
if current and current[0] == section[0]:
with tag('span',klass='toc-section emph'):
text(str(section[0])+' '+section[1])
else:
with tag('span',klass='toc-section'):
text(str(section[0])+' '+section[1])
string = '\n'+doc.getvalue()
else:
string = '\n'+str(section[0])+' '+section[1]
return string
@staticmethod
def print_subsection(section,subsection,html=False,current=None):
"""Method for printing subsection data.
Parameters
----------
section: list
section data
subsection: list
subsection data
html: bool, optional
activate html tags
current: list, optional
list containing current section, subsection and slide number used
for highlighting current slide into the TOC
Returns
-------
str
string containing the pretty printed subsection data
"""
if html:
doc, tag, text = Doc().tagtext()
with tag('a',href='#slide-'+str(subsection[4])):
if current and current[0] == section[0] and current[1] == subsection[0]:
with tag('span',klass='toc-subsection emph'):
text(' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1])
else:
with tag('span',klass='toc-subsection'):
text(' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1])
string = '\n'+doc.getvalue()
else:
string = '\n'+' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1]
return string
@staticmethod
def print_slide(section,subsection,slide,html=False,current=None):
"""Method for printing slideta.
Parameters
----------
section: list
section data
subsection: list
subsection data
slide: list
slide data
html: bool, optional
activate html tags
current: list, optional
list containing current section, subsection and slide number used
for highlighting current slide into the TOC
Returns
-------
str
string containing the pretty printed slide data
"""
if html:
doc, tag, text = Doc().tagtext()
with tag('a',href='#slide-'+str(slide[0])):
if current and current[0] == section[0] and current[1] == subsection[0] and current[2] == slide[0]:
with tag('span',klass='toc-slide emph'):
text(' '+str(section[0])+'.'+str(subsection[3])+'.'+str(slide[2])+' '+slide[1])
else:
with tag('span',klass='toc-slide'):
text(' '+str(section[0])+'.'+str(subsection[3])+'.'+str(slide[2])+' '+slide[1])
string = '\n'+doc.getvalue()
else:
string = '\n'+' '+str(section[0])+'.'+str(subsection[3])+'.'+str(slide[2])+' '+slide[1]
return string
def pstr(self,html=False,current=None,deep=None):
"""Method powering __str__ obtaining a customizable pretty printer.
Parameters
----------
html: bool, optional
activate html tags
current: list, optional
list containing current section, subsection and slide number used
for highlighting current slide into the TOC
deep : {1,2,3}, optional
depth of printed TOC; 1 => print only sections, 2 => print sections and subsections, 3=> print sections, subsections and slides
"""
deep = deep
if not deep:
deep = self.deep
string = ''
for section in self.sections:
string += self.print_section(section=section,html=html,current=current)
if deep > 1:
for subsection in section[2]:
string += self.print_subsection(section=section,subsection=subsection,html=html,current=current)
if deep > 2:
for slide in subsection[2]:
string += self.print_slide(section=section,subsection=subsection,slide=slide,html=html,current=current)
return string
def get(self,sections):
"""
Method for building TOC from presentation sections.
"""
for sec,section in enumerate(sections):
self.sections.append([ int(section.number),section.title,[],0 ])
for sub,subsection in enumerate(section.subsections):
self.sections[sec][2].append([ int(subsection.number),subsection.title,[],int(subsection.local_number),0 ])
for slide in subsection.slides:
self.sections[sec][2][sub][2].append([ int(slide.number),slide.title,int(slide.local_number) ])
if slide.data['first_of_sec']:
self.sections[sec][3] = slide.number
if slide.data['first_of_subsec']:
self.sections[sec][2][sub][4] = slide.number
return
def parse(self, source, current=None):
"""Method for substituting $toc with its pretty printed version.
Parameters
----------
source : str
string (as single stream) containing the source
current: list, optional
list containing current section, and subsection number used
for highlighting current slide into the TOC
"""
parsed_source = source
for match in re.finditer(__retoc__, parsed_source):
deep = match.group('deep')
if deep:
deep = int(deep)
doc = Doc()
with doc.tag('div', klass='toc'):
doc.asis(self.pstr(html=True, current=current, deep=deep))
# parsed_source = re.sub(__retoc__, lambda x: doc.getvalue(), parsed_source, 1)
parsed_source = re.sub(__retoc__, doc.getvalue(), parsed_source, 1)
doc = None
return parsed_source
| gpl-3.0 | -6,060,174,641,161,162,000 | 34.07907 | 149 | 0.623309 | false |
dr-slump/bajawa | scripts/python/discover-slony-cluster.py | 1 | 1432 | #------------------------------------------------------
# $Date: 2014-06-23 08:03:15 +0000 (21 Aug 2014) $
# $Revision: 1 $
# $Author: rikih.gunawan $
#------------------------------------------------------
#
# /var/lib/zabbix/scripts/discover-slony-cluster.py
#
#!/usr/bin/env python
# -rikih.gunawan
# 20140729
import psycopg2
cmd = "select nspname from pg_catalog.pg_namespace where nspname ~ '^_';"
output=[]
conn = psycopg2.connect("dbname=db user=user")
cur = conn.cursor()
cur.execute(cmd)
out = cur.fetchall()
for cluster in out:
cur.execute("select a.st_received, b.no_comment from " + cluster[0] + ".sl_status as a, " + cluster[0] + ".sl_node as b where a.st_received=b.no_id;")
outtemp = cur.fetchall()
for out in outtemp:
if out not in output:
out = (cluster[0],) + out
output.append(out)
cur.close()
conn.close()
print """
{
"data": [
{
"""
max=len(output)
i=0
for x in output:
print ' "{#SLONYCLUSTER}": "' + str(x[0]).rstrip() + '",'
print ' "{#SLONYNODEID}": "' + str(x[1]).rstrip() + '",'
print ' "{#SLONYNODE}": "' + str(x[2]).rstrip() + '"'
if i <> max - 1:
print """
},
{
"""
else:
print """
}
"""
i+=1
print """
]
}
"""
| mit | 6,982,002,341,199,823,000 | 21.375 | 153 | 0.441341 | false |
RincewindLangner/Game_database | V12/gamefile_load.py | 1 | 1469 | import sqlite3 # sqlite 3 database
import file_check_ex # to check if there is a file
import database # creating, editing, deleting the database
import intCheck # check that it is an integar
option = None # choose what manu option they want
sqlite_file = "" # the name of the sqlite3 file
option_list = (0, 1, 2, 3) # the menu list option to use with option variable
#option_tuple = None # Info from the intCheck held in a tuple. Use var[0]
# print opening line
print('\nWelcome to the texted based game database of your games')
print('\nPlease enter a number to select an option')
# user selection of options
while option != 0:
print('\nExit program = 0 \nNew database = 1\nEdit database = 2 \nDelete database = 3')
# remember the sqlit3 file name
sqlite_file = sqlite_file
#Get user input for menu select
option = intCheck.int_check(input('\nEnter number: ')) # get user input and check it is an intager
if option in option_list:
option = option
if option == 1:
sqlite_file = database.createDatabase()
elif option == 2:
database.editDatabase(sqlite_file)
elif option == 3:
print("\nThis is where you would delete a database, but not yet.")
elif option == 0:
print("\nThanks for playing")
else:
print("\nother option") # temp line
else:
print("\nPlease re-enter a valid number")
# add to the columes in the database to make them equel for the game and game expasion so I can use the same gameVaule to input both.
| gpl-3.0 | 1,675,010,851,396,724,500 | 33.162791 | 134 | 0.71341 | false |
pipermerriam/flex | tests/validation/parameter/test_min_max_items_validation.py | 1 | 3262 | import pytest
from flex.exceptions import ValidationError
from flex.loading.schema.paths.path_item.operation.parameters import (
parameters_validator,
)
from flex.validation.parameter import (
validate_parameters,
)
from flex.constants import (
PATH,
ARRAY,
STRING,
)
from flex.error_messages import MESSAGES
from tests.utils import assert_message_in_errors
#
# minimum validation tests
#
@pytest.mark.parametrize(
'min_items,value',
(
(1, []),
(2, ['a']),
(5, ['1', '2', '3', '4']),
),
)
def test_min_items_on_values_with_too_few_items(min_items, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description':'id',
'type': ARRAY,
'required': True,
'minItems': min_items,
'items': {'type': STRING},
},
])
parameter_values = {
'id': value,
}
with pytest.raises(ValidationError) as err:
validate_parameters(parameter_values, parameters, {})
assert_message_in_errors(
MESSAGES['min_items']['invalid'],
err.value.detail,
'id.minItems',
)
@pytest.mark.parametrize(
'min_items,value',
(
(1, ['a']),
(1, ['a', 'b']),
(3, ['1', '2', '3', '4']),
),
)
def test_min_items_on_values_with_valid_array_length(min_items, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description':'id',
'type': ARRAY,
'required': True,
'minItems': min_items,
'items': {'type': STRING},
},
])
parameter_values = {
'id': value,
}
validate_parameters(parameter_values, parameters, {})
#
# maximum validation tests
#
@pytest.mark.parametrize(
'max_items,value',
(
(1, ['a', 'b']),
(2, ['1', '2', '3']),
(5, ['1', '2', '3', '4', '5', '6']),
),
)
def test_max_items_on_values_with_too_many_items(max_items, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description':'id',
'type': ARRAY,
'required': True,
'maxItems': max_items,
'items': {'type': STRING},
},
])
parameter_values = {
'id': value,
}
with pytest.raises(ValidationError) as err:
validate_parameters(parameter_values, parameters, {})
assert_message_in_errors(
MESSAGES['max_items']['invalid'],
err.value.detail,
'id.maxItems',
)
@pytest.mark.parametrize(
'max_items,value',
(
(1, []),
(1, ['a']),
(2, ['a', 'b']),
(5, ['1', '2', '3', '4']),
),
)
def test_max_items_on_values_with_valid_array_length(max_items, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description':'id',
'type': ARRAY,
'required': True,
'maxItems': max_items,
'items': {'type': STRING},
},
])
parameter_values = {
'id': value,
}
validate_parameters(parameter_values, parameters, {})
| mit | 3,032,155,428,753,175,000 | 21.342466 | 71 | 0.497854 | false |
adityabansal/newsAroundMe | newsApp/jobManager.py | 1 | 1205 | import os
from .queueHelper import *
from .workerJob import WorkerJob
class JobManager:
"""
Helper class to enqueue and dequeue jobs to the job queue.
"""
def __init__(self, connectionStringKey):
"""
Instantiates the job manager.
'connectionStringKey' : name of environment variable containing the
connection string to use.
"""
self.queue = getQueue(os.environ[connectionStringKey])
def enqueueJob(self, job):
"""
Enqueue the job into the jobs queue.
"""
enqueueMessage(self.queue, job.serializeToString())
def convertDequeuedMessageToJob(self, dequeuedMessage):
if dequeuedMessage is None:
return None
dequeuedJob = WorkerJob(None, None)
dequeuedJob.deserializeFromString(dequeuedMessage)
return dequeuedJob
def dequeueJob(self):
"""
Dequeue a job from the job queue.
"""
dequeuedMessage = dequeueMessage(self.queue)
return self.convertDequeuedMessageToJob(dequeuedMessage)
def count(self):
"""
Return the count of messages in queue.
"""
return self.queue.count()
| mit | -8,828,796,082,122,579,000 | 24.104167 | 75 | 0.629876 | false |
laborautonomo/leap_mail | src/leap/mail/smtp/tests/test_gateway.py | 1 | 13905 | # -*- coding: utf-8 -*-
# test_gateway.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
SMTP gateway tests.
"""
import re
from datetime import datetime
from twisted.test import proto_helpers
from twisted.mail.smtp import User, Address
from mock import Mock
from leap.mail.smtp.gateway import (
SMTPFactory,
EncryptedMessage,
)
from leap.mail.tests import (
TestCaseWithKeyManager,
ADDRESS,
ADDRESS_2,
)
from leap.keymanager import openpgp
# some regexps
IP_REGEX = "(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}" + \
"([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
HOSTNAME_REGEX = "(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*" + \
"([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])"
IP_OR_HOST_REGEX = '(' + IP_REGEX + '|' + HOSTNAME_REGEX + ')'
class TestSmtpGateway(TestCaseWithKeyManager):
EMAIL_DATA = ['HELO gateway.leap.se',
'MAIL FROM: <%s>' % ADDRESS_2,
'RCPT TO: <%s>' % ADDRESS,
'DATA',
'From: User <%s>' % ADDRESS_2,
'To: Leap <%s>' % ADDRESS,
'Date: ' + datetime.now().strftime('%c'),
'Subject: test message',
'',
'This is a secret message.',
'Yours,',
'A.',
'',
'.',
'QUIT']
def assertMatch(self, string, pattern, msg=None):
if not re.match(pattern, string):
msg = self._formatMessage(msg, '"%s" does not match pattern "%s".'
% (string, pattern))
raise self.failureException(msg)
def test_openpgp_encrypt_decrypt(self):
"Test if openpgp can encrypt and decrypt."
text = "simple raw text"
pubkey = self._km.get_key(
ADDRESS, openpgp.OpenPGPKey, private=False)
encrypted = self._km.encrypt(text, pubkey)
self.assertNotEqual(
text, encrypted, "Ciphertext is equal to plaintext.")
privkey = self._km.get_key(
ADDRESS, openpgp.OpenPGPKey, private=True)
decrypted = self._km.decrypt(encrypted, privkey)
self.assertEqual(text, decrypted,
"Decrypted text differs from plaintext.")
def test_gateway_accepts_valid_email(self):
"""
Test if SMTP server responds correctly for valid interaction.
"""
SMTP_ANSWERS = ['220 ' + IP_OR_HOST_REGEX +
' NO UCE NO UBE NO RELAY PROBES',
'250 ' + IP_OR_HOST_REGEX + ' Hello ' +
IP_OR_HOST_REGEX + ', nice to meet you',
'250 Sender address accepted',
'250 Recipient address accepted',
'354 Continue']
# XXX this bit can be refactored away in a helper
# method...
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
self._config['encrypted_only']).buildProtocol(('127.0.0.1', 0))
# snip...
transport = proto_helpers.StringTransport()
proto.makeConnection(transport)
for i, line in enumerate(self.EMAIL_DATA):
proto.lineReceived(line + '\r\n')
self.assertMatch(transport.value(),
'\r\n'.join(SMTP_ANSWERS[0:i + 1]),
'Did not get expected answer from gateway.')
proto.setTimeout(None)
def test_message_encrypt(self):
"""
Test if message gets encrypted to destination email.
"""
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
self._config['encrypted_only']).buildProtocol(('127.0.0.1', 0))
fromAddr = Address(ADDRESS_2)
dest = User(ADDRESS, 'gateway.leap.se', proto, ADDRESS)
m = EncryptedMessage(
fromAddr, dest, self._km, self._config['host'],
self._config['port'], self._config['cert'], self._config['key'])
for line in self.EMAIL_DATA[4:12]:
m.lineReceived(line)
# m.eomReceived() # this includes a defer, so we avoid calling it here
m.lines.append('') # add a trailing newline
# we need to call the following explicitelly because it was deferred
# inside the previous method
m._maybe_encrypt_and_sign()
# assert structure of encrypted message
self.assertTrue('Content-Type' in m._msg)
self.assertEqual('multipart/encrypted', m._msg.get_content_type())
self.assertEqual('application/pgp-encrypted',
m._msg.get_param('protocol'))
self.assertEqual(2, len(m._msg.get_payload()))
self.assertEqual('application/pgp-encrypted',
m._msg.get_payload(0).get_content_type())
self.assertEqual('application/octet-stream',
m._msg.get_payload(1).get_content_type())
privkey = self._km.get_key(
ADDRESS, openpgp.OpenPGPKey, private=True)
decrypted = self._km.decrypt(
m._msg.get_payload(1).get_payload(), privkey)
self.assertEqual(
'\n' + '\r\n'.join(self.EMAIL_DATA[9:12]) + '\r\n\r\n--\r\n' +
'I prefer encrypted email - https://leap.se/key/anotheruser\r\n',
decrypted,
'Decrypted text differs from plaintext.')
def test_message_encrypt_sign(self):
"""
Test if message gets encrypted to destination email and signed with
sender key.
"""
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
self._config['encrypted_only']).buildProtocol(('127.0.0.1', 0))
user = User(ADDRESS, 'gateway.leap.se', proto, ADDRESS)
fromAddr = Address(ADDRESS_2)
m = EncryptedMessage(
fromAddr, user, self._km, self._config['host'],
self._config['port'], self._config['cert'], self._config['key'])
for line in self.EMAIL_DATA[4:12]:
m.lineReceived(line)
# trigger encryption and signing
# m.eomReceived() # this includes a defer, so we avoid calling it here
m.lines.append('') # add a trailing newline
# we need to call the following explicitelly because it was deferred
# inside the previous method
m._maybe_encrypt_and_sign()
# assert structure of encrypted message
self.assertTrue('Content-Type' in m._msg)
self.assertEqual('multipart/encrypted', m._msg.get_content_type())
self.assertEqual('application/pgp-encrypted',
m._msg.get_param('protocol'))
self.assertEqual(2, len(m._msg.get_payload()))
self.assertEqual('application/pgp-encrypted',
m._msg.get_payload(0).get_content_type())
self.assertEqual('application/octet-stream',
m._msg.get_payload(1).get_content_type())
# decrypt and verify
privkey = self._km.get_key(
ADDRESS, openpgp.OpenPGPKey, private=True)
pubkey = self._km.get_key(ADDRESS_2, openpgp.OpenPGPKey)
decrypted = self._km.decrypt(
m._msg.get_payload(1).get_payload(), privkey, verify=pubkey)
self.assertEqual(
'\n' + '\r\n'.join(self.EMAIL_DATA[9:12]) + '\r\n\r\n--\r\n' +
'I prefer encrypted email - https://leap.se/key/anotheruser\r\n',
decrypted,
'Decrypted text differs from plaintext.')
def test_message_sign(self):
"""
Test if message is signed with sender key.
"""
# mock the key fetching
self._km.fetch_keys_from_server = Mock(return_value=[])
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
self._config['encrypted_only']).buildProtocol(('127.0.0.1', 0))
user = User('[email protected]',
'gateway.leap.se', proto, ADDRESS)
fromAddr = Address(ADDRESS_2)
m = EncryptedMessage(
fromAddr, user, self._km, self._config['host'],
self._config['port'], self._config['cert'], self._config['key'])
for line in self.EMAIL_DATA[4:12]:
m.lineReceived(line)
# trigger signing
# m.eomReceived() # this includes a defer, so we avoid calling it here
m.lines.append('') # add a trailing newline
# we need to call the following explicitelly because it was deferred
# inside the previous method
m._maybe_encrypt_and_sign()
# assert structure of signed message
self.assertTrue('Content-Type' in m._msg)
self.assertEqual('multipart/signed', m._msg.get_content_type())
self.assertEqual('application/pgp-signature',
m._msg.get_param('protocol'))
self.assertEqual('pgp-sha512', m._msg.get_param('micalg'))
# assert content of message
self.assertEqual(
'\r\n'.join(self.EMAIL_DATA[9:13]) + '\r\n--\r\n' +
'I prefer encrypted email - https://leap.se/key/anotheruser\r\n',
m._msg.get_payload(0).get_payload(decode=True))
# assert content of signature
self.assertTrue(
m._msg.get_payload(1).get_payload().startswith(
'-----BEGIN PGP SIGNATURE-----\n'),
'Message does not start with signature header.')
self.assertTrue(
m._msg.get_payload(1).get_payload().endswith(
'-----END PGP SIGNATURE-----\n'),
'Message does not end with signature footer.')
# assert signature is valid
pubkey = self._km.get_key(ADDRESS_2, openpgp.OpenPGPKey)
# replace EOL before verifying (according to rfc3156)
signed_text = re.sub('\r?\n', '\r\n',
m._msg.get_payload(0).as_string())
self.assertTrue(
self._km.verify(signed_text,
pubkey,
detached_sig=m._msg.get_payload(1).get_payload()),
'Signature could not be verified.')
def test_missing_key_rejects_address(self):
"""
Test if server rejects to send unencrypted when 'encrypted_only' is
True.
"""
# remove key from key manager
pubkey = self._km.get_key(ADDRESS, openpgp.OpenPGPKey)
pgp = openpgp.OpenPGPScheme(
self._soledad, gpgbinary=self.GPG_BINARY_PATH)
pgp.delete_key(pubkey)
# mock the key fetching
self._km.fetch_keys_from_server = Mock(return_value=[])
# prepare the SMTP factory
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
self._config['encrypted_only']).buildProtocol(('127.0.0.1', 0))
transport = proto_helpers.StringTransport()
proto.makeConnection(transport)
proto.lineReceived(self.EMAIL_DATA[0] + '\r\n')
proto.lineReceived(self.EMAIL_DATA[1] + '\r\n')
proto.lineReceived(self.EMAIL_DATA[2] + '\r\n')
# ensure the address was rejected
lines = transport.value().rstrip().split('\n')
self.assertEqual(
'550 Cannot receive for specified address',
lines[-1],
'Address should have been rejecetd with appropriate message.')
def test_missing_key_accepts_address(self):
"""
Test if server accepts to send unencrypted when 'encrypted_only' is
False.
"""
# remove key from key manager
pubkey = self._km.get_key(ADDRESS, openpgp.OpenPGPKey)
pgp = openpgp.OpenPGPScheme(
self._soledad, gpgbinary=self.GPG_BINARY_PATH)
pgp.delete_key(pubkey)
# mock the key fetching
self._km.fetch_keys_from_server = Mock(return_value=[])
# prepare the SMTP factory with encrypted only equal to false
proto = SMTPFactory(
u'[email protected]',
self._km, self._config['host'],
self._config['port'],
self._config['cert'], self._config['key'],
False).buildProtocol(('127.0.0.1', 0))
transport = proto_helpers.StringTransport()
proto.makeConnection(transport)
proto.lineReceived(self.EMAIL_DATA[0] + '\r\n')
proto.lineReceived(self.EMAIL_DATA[1] + '\r\n')
proto.lineReceived(self.EMAIL_DATA[2] + '\r\n')
# ensure the address was accepted
lines = transport.value().rstrip().split('\n')
self.assertEqual(
'250 Recipient address accepted',
lines[-1],
'Address should have been accepted with appropriate message.')
| gpl-3.0 | -6,690,810,601,957,365,000 | 41.916667 | 79 | 0.565768 | false |
DanHoerst/notecard | notecard/settings.py | 1 | 3355 | # Django settings for notecard project.
import os
import sys
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('dan hoerst', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'notecard', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
'/app/notecard/templates/static/',
)
# Storage for S3
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ.get('aws_KEYID')
AWS_SECRET_ACCESS_KEY = os.environ.get('aws_ACCESSKEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('s3_BUCKET')
STATIC_URL = os.environ.get('s3_URL')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
secret_KEY=os.environ.get('secret_KEY')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'notecard.urls'
TEMPLATE_DIRS = (
'/app/notecard/templates',
'/home/dan/notecard/notecard/templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'notecards',
'registration',
'markdown',
'about',
'south',
'search',
'storages',
'django.contrib.admin',
'django.contrib.admindocs',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
## Sendgrid
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME')
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
## Memcached
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache'
}
}
# Heroku deprecated settings.py injection
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
try:
from local_settings import *
except ImportError:
pass
| gpl-3.0 | 4,785,925,985,693,658,000 | 22.137931 | 120 | 0.654545 | false |
libeanim/refuelking | refuelking/api.py | 1 | 3176 | import requests
import json
class Tankerkoenig():
"""
Wrapper for Tankerkönig-API calls.
* Parameters:
:api_key: ``str``; Tankerönik API key.
"""
def __init__(self, api_key):
self.api_key = api_key
def __check(self, r):
"""
Internal method to check whether a request was successful.
* Parameters:
:r: ``request``; http response of api call.
* Return:
``True`` if request was successful, ``Exception`` if not.
"""
if r.status_code != 200 or \
'application/json' not in r.headers['content-type']:
raise Exception('Request failed.\n'
'Code: {}\nText: {}'.format(r.status_code, r.text))
return True
def list(self, lat, lng, rad, sort='dist', type='all'):
"""
'List' request of tankerkoenig api. Returns a list of gas station close
to a given location sorted by distance.
* Parameters:
:lat: ``float``; latitude of the chosen location.
:lng: ``float``; longitude of the chosen location.
:rad:
``float``;
max radius around the given location in which gas stations
should be searched.
:sort:
``str``, default: ``'dist'``, options: ``('dist', 'price')``;
sorts the results by either price ``('price')`` or distance
('dist').
*Hint:* the sorting by price is only possible if the
chosen fuel type is ``('diesel', 'e10', 'e5')`` and not
``all``.
:type:
``str``, default: ``'all'``, options: ``('diesel', 'e10',
'e5', 'all')``;
can be set to return a specific fuel type ``('diesel', 'e10',
'e5')``.
*Hint:* the sorting by price is only possible if the
chosen fuel type is ``diesel``, ``e10`` or ``e5`` and not
``all``.
* Return:
``dict``;
returns a dictionary containing a list of stations
(id: 'all_stations').
"""
r = requests.get('https://creativecommons.tankerkoenig.de'
'/json/list.php',
params=dict(lat=lat, lng=lng, rad=rad, sort=sort,
type=type, apikey=self.api_key))
self.__check(r)
return json.loads(r.text)
def detail(self, id):
"""
'Detail' request of tankerkoenig api. Returns detailed information of
a specific gas station by id.
* Parameters:
:id: ``str``; gas station id provided by tankerkoenig.
* Result:
``dict``;
returns a dictionary with detailed information of the gas station
corresponding to the given id.
"""
r = requests.get('https://creativecommons.tankerkoenig.de/'
'json/detail.php',
params=dict(id=id, apikey=self.api_key))
self.__check(r)
return json.loads(r.text)
| gpl-3.0 | 8,470,868,410,601,090,000 | 30.425743 | 79 | 0.495904 | false |
vrbacky/proceseq_16s | proceseq_16s/rdp.py | 1 | 7027 | #!/usr/bin/env python3
import csv
import pydoc
import subprocess
import tempfile
class Analyzer():
'''Analyze sequences in fasta file by RDP Classifier.
Parameters
----------
fasta_file : str
Path to the fasta file containing sequences to be analyzed (default: None)
analyzer_file : str
Path to the RDP Classifier java file (default: None)
taxonomy_ranks : tuple
Taxonomy ranks returned by *element_to_tsv* method
(default: ('kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'))
ram : int
Amount of RAM used by RDP Classifier in GB (default: 16)
Example
-------
::
from proceseq_16s import rdp
rdp_analyzer = rdp.analyzer('./classifier.jar', fasta_file='./sequences.fa')
with open("taxonomizer_seqs.tsv", "w") as output_file:
for seq_id in rdp_analyzer.taxonomy:
taxonomy_for_tsv = rdp_analyzer.element_to_tsv(seq_id)
output_file.write('Sequence ID: {}\\n'
'Taxonomy: {}\\n'.format(seq_id, taxonomy_for_tsv))
Attributes
----------
analyzer_file : str
Path to the RDP Classifier java file
fasta_file : str
Path to the fasta file containing sequences to be analyzed
ram : int
Amount of RAM used by RDP Classifier in GB
taxonomy : dict
Dictionary mapping sequence identifiers as keys to taxonomy information.
Taxonomy information is presented as another dictionary mapping taxonomy ranks to
tuples containing name of taxon and confidence provided by RDP Classifier
{seq_id:{rank:(taxon, confidence)}}
taxonomy_ranks : tuple
Taxonomy ranks returned by *element_to_tsv* method
(default: ('kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'))
'''
def __init__(self, fasta_file=None, analyzer_file=None, taxonomy_ranks=None,
ram=16):
self.analyzer_file = analyzer_file
self.fasta_file = fasta_file
if taxonomy_ranks is None:
self.taxonomy_ranks = ('kingdom', 'phylum', 'class', 'order', 'family',
'genus', 'species')
else:
self.taxonomy_ranks = taxonomy_ranks
self.ram = ram
self._taxonomy = None
def _taxonomize(self, gene='16srrna'):
'''Analyze input fasta file by RDP Classifier and parse results
Parameters
----------
gene : str
Gene type (-g parameter of RDP Classifier) [16srrna, fungallsu,
fungalits_warcup, fungalits_unite] (default: '16srrna')
Returns
-------
dict
Dictionary mapping sequence identifiers as keys to taxonomy information.
Taxonomy information is presented as another dictionary mapping taxonomy
ranks to tuples containing name of taxon and confidence provided
by RDP Classifier {seq_id:{rank:(taxon, confidence)}}
'''
with tempfile.NamedTemporaryFile(suffix='-rdp.csv',
mode='w',
delete=False) as tmp_file:
subprocess.call(['java', '-Xmx{}g'.format(self.ram), '-jar',
self.analyzer_file, 'classify', '-c', '0.8',
'-g', str(gene), '-o', str(tmp_file.name), self.fasta_file])
self._taxonomized_file = tmp_file.name
return self.parse_results()
def parse_results(self, taxonomized_file=None, domain_to_kingdom=True):
'''Parse results of the RDP Classifier analysis
Parameters
----------
taxonomized_file : str
Path to a file with results of RDP Classifier analysis (required)
domain_to_kingdom : boolean
Copy 'domain' taxonomy ranks in RDP Classifier results as 'kingdom',
if no 'kingdom' rank is present (default: True)
Returns
-------
dict
Dictionary mapping sequence identifiers as keys to taxonomy information.
Taxonomy information is presented as another dictionary mapping taxonomy
ranks to tuples containing name of taxon and confidence provided
by RDP Classifier {seq_id:{rank:(taxon, confidence)}}
'''
if taxonomized_file is None:
try:
taxonomized_file = self._taxonomized_file
except AttributeError:
raise AttributeError('Missing file with RDP Classifier results!')
with open(taxonomized_file, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
self._taxonomy = {}
for row in csv_reader:
parsed_taxonomy = {row[i + 1]: (row[i].strip(), row[i + 2].strip())
for i in range(2, len(row), 3)}
if (domain_to_kingdom and 'kingdom' not in parsed_taxonomy):
parsed_taxonomy['kingdom'] = parsed_taxonomy.get('domain', ('NA', 0))
if len(row) == 0:
continue
self._taxonomy[row[0]] = parsed_taxonomy
return self._taxonomy
@property
def taxonomy(self):
'''Taxonomized sequences from input fasta file
Dictionary mapping sequence identifiers as keys to taxonomy information.
Taxonomy information is presented as another dictionary mapping taxonomy
ranks to tuples containing name of taxon and confidence provided
by RDP Classifier {seq_id:{rank:(taxon, confidence)}}
:type: dict
:getter: Returns taxonomized sequences
'''
self._taxonomize()
return self._taxonomy
def element_to_tsv(self, seq_id):
'''Format results from the last analyzed taxonomy for output to tsv file
Parameters
----------
seq_id : str
Results of the sequence with this identifier will be formated
and returned (required)
Returns
-------
str
Result of the analysis formated for saving to the output tsv file
(including trailing next line character)
'''
if self._taxonomy is None:
self._taxonomize()
seq_id = str(seq_id)
try:
extracted_taxonomy = [self._taxonomy[seq_id].get(taxon, ('NA', 'NA'))
for taxon in self.taxonomy_ranks]
transposed_taxonomy = list(zip(*extracted_taxonomy))
taxonomy_for_tsv = ('\tRDP-Taxonomy\t{}\n\tRDP-Confidence\t{}\n'.format(
'\t'.join(transposed_taxonomy[0]),
'\t'.join(transposed_taxonomy[1])))
except KeyError:
print('RDP Classifier - sequence ID {} not found'.format(seq_id))
taxonomy_for_tsv = ''
return taxonomy_for_tsv
if __name__ == '__main__':
exit(pydoc.render_doc(__name__))
| mit | -5,823,275,120,657,471,000 | 35.409326 | 89 | 0.574498 | false |
idanivanov/catdtree | tests/test_BaseDecisionTree.py | 1 | 1761 | from nose import with_setup
from nose.tools import assert_equal
import pandas as pd
from catdtree import BaseDecisionTree
class MockDecisionTree(BaseDecisionTree):
def __init__(self):
BaseDecisionTree.__init__(self)
def _choose_best_split(self, X_part, y_part):
if len(set(X_part['Sex'])) > 1:
branch_data_mask = X_part['Sex'] == 'Female'
best_split = [
(
u'Sex is Female',
lambda X, y: (X[branch_data_mask], y[branch_data_mask])
),
(
u'Sex is Male',
lambda X, y: (X[~branch_data_mask], y[~branch_data_mask])
)
]
return best_split
if any(X_part['Age'] >= 35) and any(X_part['Age'] < 35):
branch_data_mask = X_part['Age'] >= 35
best_split = [
(
u'Age is greater than 35',
lambda X, y: (X[branch_data_mask], y[branch_data_mask])
),
(
u'Age is less than 35',
lambda X, y: (X[~branch_data_mask], y[~branch_data_mask])
)
]
return best_split
else:
return None
def test_fit():
tree_str_exp = u'''Root
|--> Sex is Female
| |--> Age is greater than 35
| |--> Age is less than 35
|--> Sex is Male
| |--> Age is greater than 35
| |--> Age is less than 35
'''
hot_data = pd.read_csv('tests/hot.csv')
X, y = hot_data.drop('Hot', axis=1), hot_data['Hot']
mdt = MockDecisionTree()
mdt.fit(X, y)
tree_str = mdt.tree.show()
assert tree_str_exp == tree_str, 'The tree was not built as expected.'
| mit | 964,071,306,302,019,300 | 29.362069 | 77 | 0.478705 | false |
andreav/swgit | core/OldPython.py | 1 | 3064 | #!/usr/bin/env python
# Copyright (C) 2012 Andrea Valle
#
# This file is part of swgit.
#
# swgit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# swgit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with swgit. If not, see <http://www.gnu.org/licenses/>.
#
# IN THIS FILE WE PUT THING OLD PYTHONS HAS NOT
#
import os
import sys
# Creates os.path.relpath for Python 2.4
if not hasattr(os, 'relpath'):
if os.path is sys.modules.get('ntpath'):
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
else:
# default to posixpath definition
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
os.path.relpath = relpath
| gpl-3.0 | 6,104,137,058,317,089,000 | 37.78481 | 88 | 0.559073 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.