code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from utils import *
Clean()
GitUpdate33()
PatchAll()
Build_VC11Express_64()
RunAll()
Build_VC14Express_64()
RunAll()
Build_CodeBlocks()
RunAll()
Clean()
GitUpdate21()
PatchAll()
Build_VC14Express_64()
RunAll()
Clean()
GitUpdate33()
| NelsonBilber/cg.opengltutorial | distrib/tests_potiron.py | Python | mit | 272 |
import tensorflow as tf
import os
def create_single_queue(bucket_id, filename, batch_size, buckets):
"""
Return a shuffle_queue which output element from {bucket_id} bucket
:param bucket_id: int
:param filename: str
:param batch_size: int
:param buckets: list
:return:
"""
file_name = os.path.dirname(os.path.abspath(__file__))
path_to_save_example = os.path.join(file_name, os.pardir, "Examples")
filename = os.path.join(path_to_save_example, "{}{}.tfrecords".format(filename, bucket_id))
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
# Read a single example
_, serialized_example = reader.read(filename_queue)
# Scalar features
context_features = {
"length_question": tf.FixedLenFeature([], dtype=tf.int64),
"length_answer": tf.FixedLenFeature([], dtype=tf.int64)
}
# Tensor features
sequence_features = {
"question": tf.VarLenFeature(dtype=tf.int64),
"answer": tf.VarLenFeature(dtype=tf.int64)
}
# Parse a single example
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
batch_size = batch_size
capacity = 10 * batch_size
min_after_dequeue = 9 * batch_size
# Basically, pad question with zeros if shorter than buckets[bucket_id][0]
length_question = context_parsed["length_question"]
question = sequence_parsed["question"]
question = tf.sparse_tensor_to_dense(question)
question = tf.reshape(question, [-1])
pad_question = tf.zeros(shape=[buckets[bucket_id][0] - tf.cast(length_question, tf.int32)], dtype=tf.int64)
question = tf.concat([question, pad_question], axis=0)
question.set_shape(buckets[bucket_id][0])
# Basically, pad answer with zeros if shorter than buckets[bucket_id][1]
length_answer = context_parsed["length_answer"]
answer = sequence_parsed["answer"]
answer = tf.sparse_tensor_to_dense(answer)
answer = tf.reshape(answer, [-1])
pad_answer = tf.zeros(shape=[buckets[bucket_id][0] - tf.cast(length_answer, tf.int32)], dtype=tf.int64)
answer = tf.concat([answer, pad_answer], axis=0)
answer.set_shape(buckets[bucket_id][1])
# Shuffle queue
return tf.train.shuffle_batch([question, answer],
batch_size,
capacity,
min_after_dequeue)
def create_queues_for_bucket(batch_size, filename, buckets):
"""
For every buckets, create a ShuffleQueue
Then create a FIFOQueue on top of this queues (used for filtering queues)
:param batch_size: int
:param filename: str
:param buckets: list
:return:
"""
shuffle_queues = []
for bucket_id in range(len(buckets)):
shuffle_queues.append(create_single_queue(bucket_id, filename, batch_size, buckets))
capacity = 30 * batch_size
# For every buckets, create a queue which return batch_size example
# of that bucket
all_queues, enqueue_ops = [], []
for bucket_id in range(len(buckets)):
queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.int64, tf.int64])
all_queues.append(queue)
enqueue_op = queue.enqueue(shuffle_queues[bucket_id])
enqueue_ops.append(enqueue_op)
return all_queues, enqueue_ops
| XefPatterson/INF8225_Project | Model/queues.py | Python | mit | 3,465 |
__all__ = ["w_autocomplete",
"w_inv_item_select",
"w_supply_select",
"w_facility_select",
"w_gis_location",
]
# @todo There are performance issues need to profile and find out in which functions are the bottlenecks
import time
from gluon import current
# -----------------------------------------------------------------------------
def _autocomple_finish(el_id, browser):
giveup = 0.0
sleeptime = 0.2
el = browser.find_element_by_id(el_id)
while giveup < 60:
try:
if el.find_elements_by_tag_name("option")[0].text != "":
return
except: # StaleElementReferenceException
print "StaleElementReferenceException %s" % giveup
el = browser.find_element_by_id(el_id)
# The pack drop down hasn't been populated yet so sleep
time.sleep(sleeptime)
giveup += sleeptime
# -----------------------------------------------------------------------------
def w_autocomplete(search,
autocomplete,
needle = None,
quiet = True,
):
config = current.test_config
browser = config.browser
autocomplete_id = "dummy_%s" % autocomplete
throbber_id = "dummy_%s_throbber" % autocomplete
if needle == None:
needle = search
elem = browser.find_element_by_id(autocomplete_id)
elem.clear()
elem.send_keys(search)
# give time for the throbber to appear
time.sleep(1)
# now wait for throbber to close
giveup = 0.0
sleeptime = 0.2
while browser.find_element_by_id(throbber_id).is_displayed():
time.sleep(sleeptime)
giveup += sleeptime
if giveup > 60:
return False
# throbber has closed and data was found, return
for i in range(10):
# For each autocomplete on the form the menu will have an id starting from 0
automenu = 0
try:
menu = browser.find_element_by_id("ui-menu-%s" % automenu)
except:
menu = None
while menu:
# Try and get the value directly
menu_items = menu.text.splitlines()
autoitem = 0
for linkText in menu_items:
if needle in linkText:
# found the text need to click on it to get the db id
menuitem = browser.find_element_by_id("ui-menu-%s-%s" % (automenu,autoitem))
menuitem.click()
db_id = browser.find_element_by_id(autocomplete)
time.sleep(15)
# The id is copied into the value attribute so use that
return int(db_id.get_attribute("value"))
autoitem += 1
automenu += 1
try:
menu = browser.find_element_by_id("ui-menu-%s" % automenu)
except:
menu = None
# end of looping through each autocomplete menu
time.sleep(sleeptime)
# -----------------------------------------------------------------------------
def w_inv_item_select (item_repr,
tablename,
field,
quiet = True,
):
config = current.test_config
browser = config.browser
el_id = "%s_%s" % (tablename, field)
el = browser.find_element_by_id(el_id)
raw_value = None
for option in el.find_elements_by_tag_name("option"):
if option.text == item_repr:
option.click()
raw_value = int(option.get_attribute("value"))
break
# Now wait for the pack_item to be populated
el_id = "%s_%s" % (tablename, "item_pack_id")
_autocomple_finish(el_id, browser)
return raw_value
# -----------------------------------------------------------------------------
def w_supply_select(item_repr,
tablename,
field,
quiet = True,
):
el_id = "%s_%s" % (tablename, field)
raw_value = w_autocomplete(item_repr, el_id)
# Now wait for the pack_item to be populated
browser = current.test_config.browser
el_id = "%s_%s" % (tablename, "item_pack_id")
_autocomple_finish(el_id, browser)
return raw_value
# -----------------------------------------------------------------------------
def w_facility_select(item_repr,
tablename,
field,
quiet = True,
):
el_id = "%s_%s" % (tablename, field)
raw_value = w_autocomplete(item_repr, el_id)
# Now wait for the pack_item to be populated
browser = current.test_config.browser
el_id = "%s_%s" % (tablename, "site_id")
_autocomple_finish(el_id, browser)
return raw_value
# -----------------------------------------------------------------------------
def w_gis_location (item_repr,
field,
quiet = True,
):
config = current.test_config
browser = config.browser
if field == "L0":
el_id = "gis_location_%s" % field
el = browser.find_element_by_id(el_id)
for option in el.find_elements_by_tag_name("option"):
if option.text == item_repr:
option.click()
raw_value = int(option.get_attribute("value"))
break
elif field[0] == "L":
# @todo make this a proper autocomplete widget (select or add)
el_id = "gis_location_%s_ac" % field
el = browser.find_element_by_id(el_id)
el.send_keys(item_repr)
raw_value = None # can't get the id at the moment (see the todo)
else:
el_id = "gis_location_%s" % field
el = browser.find_element_by_id(el_id)
el.send_keys(item_repr)
raw_value = item_repr
return raw_value
# END =========================================================================
| madhurauti/Map-Polygon | modules/tests/core/core_widgets.py | Python | mit | 5,983 |
# Copyright (c) 2009, 2012-2013, 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.params import *
from System import System
class ArmMachineType(Enum):
map = {'RealView_EB' : 827,
'RealView_PBX' : 1901,
'VExpress_EMM' : 2272,
'VExpress_EMM64' : 2272}
class ArmSystem(System):
type = 'ArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0xffffffff
multi_proc = Param.Bool(True, "Multiprocessor system?")
boot_loader = VectorParam.String([],
"File that contains the boot loader code. Zero or more files may be "
"specified. The first boot loader that matches the kernel's "
"architecture will be used.")
gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface")
flags_addr = Param.Addr(0, "Address of the flags register for MP booting")
have_security = Param.Bool(False,
"True if Security Extensions are implemented")
have_virtualization = Param.Bool(False,
"True if Virtualization Extensions are implemented")
have_lpae = Param.Bool(False, "True if LPAE is implemented")
highest_el_is_64 = Param.Bool(False,
"True if the register width of the highest implemented exception level "
"is 64 bits (ARMv8)")
reset_addr_64 = Param.Addr(0x0,
"Reset address if the highest implemented exception level is 64 bits "
"(ARMv8)")
phys_addr_range_64 = Param.UInt8(40,
"Supported physical address range in bits when using AArch64 (ARMv8)")
have_large_asid_64 = Param.Bool(False,
"True if ASID is 16 bits in AArch64 (ARMv8)")
class GenericArmSystem(ArmSystem):
type = 'GenericArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0x0fffffff
machine_type = Param.ArmMachineType('VExpress_EMM',
"Machine id from http://www.arm.linux.org.uk/developer/machines/")
atags_addr = Param.Addr("Address where default atags structure should " \
"be written")
dtb_filename = Param.String("",
"File that contains the Device Tree Blob. Don't use DTB if empty.")
early_kernel_symbols = Param.Bool(False,
"enable early kernel symbol tables before MMU")
enable_context_switch_stats_dump = Param.Bool(False, "enable stats/task info dumping at context switch boundaries")
panic_on_panic = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel panics")
panic_on_oops = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel oopses")
class LinuxArmSystem(GenericArmSystem):
type = 'LinuxArmSystem'
cxx_header = "arch/arm/linux/system.hh"
class FreebsdArmSystem(GenericArmSystem):
type = 'FreebsdArmSystem'
cxx_header = "arch/arm/freebsd/system.hh"
| Nirvedh/CoarseCoherence | src/arch/arm/ArmSystem.py | Python | bsd-3-clause | 4,862 |
from leapp.actors import Actor
from leapp.tags import ThirdPhaseTag, UnitTestWorkflowTag
class ThirdActor(Actor):
name = 'third_actor'
description = 'No description has been provided for the third_actor actor.'
consumes = ()
produces = ()
tags = (ThirdPhaseTag, UnitTestWorkflowTag)
def process(self):
from leapp.libraries.common.test_helper import log_execution
log_execution(self)
| vinzenz/prototype | tests/data/workflow-tests/actors/thirdactor/actor.py | Python | apache-2.0 | 426 |
# -*- coding: utf-8 -*-
#
# TwitterMonitor documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 20 02:47:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TwitterMonitor'
copyright = u'2014, Alisson R. Perez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TwitterMonitordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TwitterMonitor.tex', u'TwitterMonitor Documentation',
u'Alisson R. Perez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'twittermonitor', u'TwitterMonitor Documentation',
[u'Alisson R. Perez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TwitterMonitor', u'TwitterMonitor Documentation',
u'Alisson R. Perez', 'TwitterMonitor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| alissonperez/TwitterMonitor | docs/conf.py | Python | apache-2.0 | 8,278 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'zhōngzhǔ'
CN=u'中渚'
NAME=u'zhongzhu13'
CHANNEL='sanjiao'
CHANNEL_FULLNAME='SanjiaoChannelofHand-Shaoyang'
SEQ='SJ3'
if __name__ == '__main__':
pass
| sinotradition/meridian | meridian/acupoints/zhongzhu13.py | Python | apache-2.0 | 237 |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Allow use of non-standard framerates."""
import aeidon
import gaupol
import os
from aeidon.i18n import _
from gi.repository import Gtk
class AddFramerateDialog(gaupol.BuilderDialog):
"""Dialog for entering a custom framerate value."""
_widgets = ["spin_button"]
def __init__(self, parent):
"""Initialize an :class:`AddFramerateDialog` instance."""
directory = os.path.abspath(os.path.dirname(__file__))
ui_file_path = os.path.join(directory, "add-framerate-dialog.ui")
gaupol.BuilderDialog.__init__(self, ui_file_path)
self.add_button(_("_Cancel"), Gtk.ResponseType.CANCEL)
self.add_button(_("_Add"), Gtk.ResponseType.OK)
self.set_default_response(Gtk.ResponseType.OK)
self.set_transient_for(parent)
self.set_modal(True)
def _on_response(self, *args):
"""Update spin button before dispatching response."""
self._spin_button.update()
def get_framerate(self):
"""Return framerate entered in the spin button."""
return float(self._spin_button.get_value())
class PreferencesDialog(gaupol.BuilderDialog):
"""Dialog for editing a list of custom framerates."""
_widgets = ["add_button", "remove_button", "toolbar", "tree_view"]
def __init__(self, framerates, parent):
"""Initialize a :class:`PreferencesDialog` instance."""
directory = os.path.abspath(os.path.dirname(__file__))
ui_file_path = os.path.join(directory, "preferences-dialog.ui")
gaupol.BuilderDialog.__init__(self, ui_file_path)
self.set_default_response(Gtk.ResponseType.CLOSE)
self.set_transient_for(parent)
self.set_modal(True)
self._init_toolbar()
self._init_tree_view(framerates)
self._remove_button.set_sensitive(False)
gaupol.util.scale_to_content(self._tree_view,
min_nchar=30,
max_nchar=60,
min_nlines=8,
max_nlines=16)
def get_framerates(self):
"""Return the defined custom framerates."""
framerates = []
store = self._tree_view.get_model()
for i in range(len(store)):
framerates.append(store[i][0])
return sorted(framerates)
def _get_selected_rows(self):
"""Return a sequence of the selected rows."""
selection = self._tree_view.get_selection()
paths = selection.get_selected_rows()[1]
return list(map(gaupol.util.tree_path_to_row, paths))
def _init_toolbar(self):
"""Initialize the tree view inline toolbar."""
self._toolbar.set_icon_size(Gtk.IconSize.MENU)
style = self._toolbar.get_style_context()
style.add_class(Gtk.STYLE_CLASS_INLINE_TOOLBAR)
theme = Gtk.IconTheme.get_default()
# Tool buttons in the UI file are specified as symbolic icons
# by name, found in adwaita-icon-theme, if missing in another
# theme fall back to non-symbolic icons.
if not all((theme.has_icon(self._add_button.get_icon_name()),
theme.has_icon(self._remove_button.get_icon_name()))):
self._add_button.set_icon_name("list-add")
self._remove_button.set_icon_name("list-remove")
def _init_tree_view(self, framerates):
"""Initialize the tree view."""
selection = self._tree_view.get_selection()
selection.set_mode(Gtk.SelectionMode.MULTIPLE)
selection.connect("changed", self._on_tree_view_selection_changed)
store = Gtk.ListStore(float)
for framerate in framerates:
store.append((framerate,))
store.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self._tree_view.set_model(store)
renderer = Gtk.CellRendererText()
renderer.props.xalign = 1
column = Gtk.TreeViewColumn("", renderer, text=0)
column.set_sort_column_id(0)
def format_framerate(column, renderer, store, itr, data):
renderer.props.text = "{:.6f}".format(store.get_value(itr, 0))
column.set_cell_data_func(renderer, format_framerate)
self._tree_view.append_column(column)
def _on_add_button_clicked(self, *args):
"""Add a new framerate."""
dialog = AddFramerateDialog(self._dialog)
response = gaupol.util.run_dialog(dialog)
framerate = dialog.get_framerate()
dialog.destroy()
if response != Gtk.ResponseType.OK: return
store = self._tree_view.get_model()
store.append((framerate,))
def _on_remove_button_clicked(self, *args):
"""Remove the selected framerate."""
rows = self._get_selected_rows()
store = self._tree_view.get_model()
for row in reversed(sorted(rows)):
path = gaupol.util.tree_row_to_path(row)
store.remove(store.get_iter(path))
if len(store) <= 0: return
self._tree_view.set_cursor(max(row-1, 0))
def _on_tree_view_selection_changed(self, *args):
"""Set the remove button sensitivity."""
selection = self._tree_view.get_selection()
n = selection.count_selected_rows()
self._remove_button.set_sensitive(n > 0)
class CustomFrameratesExtension(gaupol.Extension):
"""Allow use of non-standard framerates."""
def __init__(self):
"""Initialize a :class:`CustomFrameratesExtension` instance."""
self.application = None
self.conf = None
self.framerates = []
def _add_framerates(self):
"""Add custom framerates and corresponding UI elements."""
self.framerates = []
menu = self.application.get_menubar_section(
"custom-framerates-placeholder")
for value in sorted(self.conf.framerates):
name = "FPS_{:.3f}".format(value).replace(".", "_")
if hasattr(aeidon.framerates, name): continue
setattr(aeidon.framerates, name, aeidon.EnumerationItem())
framerate = getattr(aeidon.framerates, name)
framerate.label = _("{:.3f} fps").format(value)
framerate.value = float(value)
self.framerates.append(framerate)
with aeidon.util.silent(AttributeError):
# Menubar not available when running unit tests.
action = "win.set-framerate::{}".format(name)
menu.append(framerate.label, action)
def _remove_framerates(self):
"""Remove custom framerates and corresponding UI elements."""
fallback = aeidon.framerates.FPS_23_976
if gaupol.conf.editor.framerate in self.framerates:
gaupol.conf.editor.framerate = fallback
# Go through all application's pages and reset those set
# to a custom framerate back to the default framerate.
orig_page = self.application.get_current_page()
for page in self.application.pages:
if not page.project.framerate in self.framerates: continue
self.application.set_current_page(page)
action = self.application.get_action("set-framerate")
action.activate(str(fallback))
self.application.set_current_page(orig_page)
for framerate in self.framerates:
delattr(aeidon.framerates, str(framerate))
with aeidon.util.silent(AttributeError):
# Menubar not available when running unit tests.
self.application.get_menubar_section(
"custom-framerates-placeholder").remove_all()
def setup(self, application):
"""Setup extension for use with `application`."""
options = dict(framerates=[48.0])
gaupol.conf.register_extension("custom_framerates", options)
self.conf = gaupol.conf.extensions.custom_framerates
self.application = application
self.framerates = []
self._add_framerates()
def show_preferences_dialog(self, parent):
"""Show a dialog to edit list of custom framerates."""
dialog = PreferencesDialog(self.conf.framerates, parent)
gaupol.util.run_dialog(dialog)
self.conf.framerates = list(dialog.get_framerates())
dialog.destroy()
self._remove_framerates()
self._add_framerates()
def teardown(self, application):
"""End use of extension with `application`."""
self._remove_framerates()
self.application = None
self.conf = None
self.framerates = []
| otsaloma/gaupol | data/extensions/custom-framerates/custom-framerates.py | Python | gpl-3.0 | 9,196 |
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from config_manager.baseconfig import BaseConfig
from config_manager.eucalyptus.topology.cluster import Cluster
from config_manager.eucalyptus.topology.cloud_controller import CloudController
from config_manager.eucalyptus.topology.walrus import Walrus
from config_manager.eucalyptus.topology.ufs import UserFacingServices
class Topology(BaseConfig):
def __init__(self,
name=None,
description=None,
read_file_path=None,
write_file_path=None,
version=None):
description = description or "Eucalyptus Cloud Topology Configuration Block"
self.cloud_controllers = self.create_property('cloud_controller')
self.walrus = self.create_property('walrus')
self.user_facing_services = self.create_property('user_facing')
self.clusters_property = self.create_property('clusters', value={})
super(Topology, self).__init__(name=name,
description=description,
write_file_path=write_file_path,
read_file_path=read_file_path,
version=version)
def add_clusters(self, clusters):
if not clusters:
raise ValueError('add_clusters provided empty value: "{0}"'
.format(clusters))
if not isinstance(clusters, list):
clusters = [clusters]
for cluster in clusters:
assert isinstance(cluster, Cluster), 'add clusters passed non ' \
'cluster type, cluster:"{0}"' \
.format(cluster)
if self.get_cluster(cluster.name.value):
raise ValueError('Cluster with name:"{0}" already exists'
.format(cluster.name.value))
self.clusters_property.value[cluster.name.value] = cluster
def create_cluster(self, name, hypervisor, read_file_path=None, write_file_path=None):
cluster = Cluster(name=name, hypervisor=hypervisor, read_file_path=read_file_path,
write_file_path=write_file_path)
self.add_clusters(cluster)
return cluster
def get_cluster(self, clustername):
if clustername in self.clusters_property.value:
return self.clusters_property.value[clustername]
return None
def delete_cluster(self, clustername):
if clustername in self.clusters_property.value:
self.clusters_property.value.pop(clustername)
else:
print 'clustername:"{0}" not in cluster list'.format(clustername)
def add_cloud_controllers(self, clcs):
if clcs is None:
raise ValueError('add_cloud_controllers provided empty '
'value: "{0}"'.format(clcs))
if not isinstance(clcs, list):
clcs = [clcs]
if self.cloud_controllers_property is None:
self.cloud_controllers_property = []
for clc in clcs:
self.cloud_controllers_property.value.append(clc)
def add_walrus(self, walrus):
self.walrus = walrus
def add_user_facing_services(self, user_facing_services):
self.user_facing_services = user_facing_services
def _aggregate_eucalyptus_properties(self, show_all=False):
eucaproperties = {}
for key in self.clusters_property.value:
cluster = self.clusters_property.value[key]
eucaproperties.update(cluster._aggregate_eucalyptus_properties(show_all=show_all))
agg_dict = super(Topology, self)._aggregate_eucalyptus_properties(show_all=show_all)
eucaproperties.update(agg_dict)
return eucaproperties
| tbeckham/DeploymentManager | config_manager/eucalyptus/topology/__init__.py | Python | apache-2.0 | 4,399 |
'''
Created on Nov 11, 2013
@author: samriggs
'''
from collections import deque
class ClumpTracker():
'''
Contains:
* the particular k-mer to track
* number of sequences in the clump
* a queue that holds the indices of the subsequences
'''
def __init__(self, subSeq, subSeqIndex, subSeqLen, windowSize, maxNumSeq):
'''
Constructor
'''
self.subSeq = subSeq
self.currentClumpWidth = len(subSeq)
self.seqQueue = deque()
self.seqQueue.append(subSeqIndex)
self.seqLength = subSeqLen
self.window = windowSize
self.numSeq = maxNumSeq
self.fits = False
def __str__(self):
return str(self.seqQueue)
'''
subSeqStart is where the subsequence is in the given string
windowSize is L
maxNumSeq is t
'''
def updateClump(self, subSeq, subSeqIndex):
# base case #0: If the current clump fits, we should not update at all.
if (self.fits):
return 0
# induction step
else:
# part one: window size not reached yet.
if (len(self.seqQueue) < self.numSeq):
self.seqQueue.append(subSeqIndex)
elif (len(self.seqQueue) == self.numSeq):
self.seqQueue.popleft()
self.seqQueue.append(subSeqIndex)
'''
update the width of the current window:
most recent index - least recent index + sequence length
'''
self.currentClumpWidth = (
self.seqQueue[-1] - self.seqQueue[0] + self.seqLength)
self.fitCheck()
return self.currentClumpWidth
'''
Mark the sequence as fitting a (L, t) clump
(denoted by windowSize and maxNumSeq, respectively)
only if the batch of sequences are at most t and where the clump size is
within L.
'''
def fitCheck(self):
if ((len(self.seqQueue) == self.numSeq) and
(self.currentClumpWidth <= self.window)):
self.fits = True
else:
self.fits = False | samriggs/bioinf | Homeworks/bi-Python/chapter1/ClumpTracker.py | Python | gpl-2.0 | 2,140 |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for process_sites.py"""
import os
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from .process_sites import process_sites, check_geo_resolution
_EXPECTED_SITE_COUNT = 1
class ProcessTest(unittest.TestCase):
def test_e2e(self):
self.maxDiff = None
base_path = os.path.dirname(__file__)
base_path = os.path.join(base_path, './data/test_data')
processed_count = process_sites(base_path, base_path)
## validate the sites processed
self.assertEqual(_EXPECTED_SITE_COUNT, processed_count)
## validate the csvs
test_df = pd.read_csv(os.path.join(base_path, 'superfund_sites.csv'))
expected_df = pd.read_csv(
os.path.join(base_path, 'superfund_sites_expected.csv'))
assert_frame_equal(test_df, expected_df)
## clean up
os.remove(os.path.join(base_path, 'superfund_sites.csv'))
os.remove(os.path.join(base_path, 'superfund_sites.tmcf'))
if __name__ == '__main__':
unittest.main()
| datacommonsorg/data | scripts/us_epa/superfund/site_and_funding_status/process_sites_test.py | Python | apache-2.0 | 1,626 |
# -*- coding: Latin-1 -*-
"""Graphviz's dot language Python interface.
This module provides with a full interface to create handle modify
and process graphs in Graphviz's dot language.
References:
pydot Homepage: http://code.google.com/p/pydot/
Graphviz: http://www.graphviz.org/
DOT Language: http://www.graphviz.org/doc/info/lang.html
Programmed and tested with Graphviz 2.26.3 and Python 2.6 on OSX 10.6.4
Copyright (c) 2005-2011 Ero Carrera <[email protected]>
Distributed under MIT license [http://opensource.org/licenses/mit-license.html].
"""
__revision__ = "$LastChangedRevision: 28 $"
__author__ = 'Ero Carrera'
__version__ = '1.0.%d' % int( __revision__[21:-2] )
__license__ = 'MIT'
import os
import re
import subprocess
import tempfile
import copy
try:
import dot_parser
except Exception, e:
print "Couldn't import dot_parser, loading of dot files will not be possible."
GRAPH_ATTRIBUTES = set( ['Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank' ] )
EDGE_ATTRIBUTES = set( ['URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank' ] )
NODE_ATTRIBUTES = set( ['URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode' ] )
CLUSTER_ATTRIBUTES = set( ['K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'] )
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError, "A frozendict cannot be modified."
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.iteritems():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append( frozendict(elm) )
else:
v_.append( elm )
arg[k] = tuple(v_)
args_.append( arg )
else:
args_.append( arg )
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.iteritems())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_,]*$', re.UNICODE)
id_re_alpha_nums_with_ports = re.compile('^[_a-zA-Z][a-zA-Z0-9_,:\"]*[a-zA-Z0-9_,\"]+$', re.UNICODE)
id_re_num = re.compile('^[0-9,]+$', re.UNICODE)
id_re_with_port = re.compile('^([^:]*):([^:]*)$', re.UNICODE)
id_re_dbl_quoted = re.compile('^\".*\"$', re.S|re.UNICODE)
id_re_html = re.compile('^<.*>$', re.S|re.UNICODE)
def needs_quotes( s ):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [id_re_alpha_nums, id_re_num, id_re_dbl_quoted, id_re_html, id_re_alpha_nums_with_ports]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance( s, basestring ):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"' : r'\"',
"\n" : r'\n',
"\r" : r'\r'}
for (a,b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to be in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return dot_parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = file(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge( src, dst )
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix= u'', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip+1
for e in r:
if e:
graph.add_edge(
Edge( node_prefix + node_orig,
node_prefix + node_dest) )
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c*node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge( node_prefix + abs(nodes[0]),
node_prefix + nodes[1] ))
if not directed:
graph.set_simplify(True)
return graph
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '', 'sfdp': ''}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if os.path.isdir(path) :
for prg in progs.iterkeys():
if progs[prg]:
continue
if os.path.exists( os.path.join(path, prg) ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg) + '"'
else:
progs[prg] = os.path.join(path, prg)
success = True
elif os.path.exists( os.path.join(path, prg + '.exe') ):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"'
else:
progs[prg] = os.path.join(path, prg + '.exe')
success = True
if success:
return progs
else:
return None
# The multi-platform version of this 'find_graphviz' function was
# contributed by Peter Cock
#
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
"""
# Method 1 (Windows only)
#
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api, win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(key, subkey, opt, sam, ctypes.byref(result))
return result.value
def RegQueryValueEx( hkey, valuename ):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer( 1024 )
res = ctypes.windll.advapi32.RegQueryValueExA(hkey, valuename, 0,
ctypes.byref(data_type), data, ctypes.byref(data_len))
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
#
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
#
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz",
]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx( HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE )
if hkey is not None:
path = RegQueryValueEx( hkey, "InstallPath" )
RegCloseKey( hkey )
# The regitry variable might exist, left by old installations
# but with no value, in those cases we keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
#
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None :
#print "Used Windows registry"
return progs
except Exception, excp:
#raise excp
pass
else:
break
# Method 2 (Linux, Windows etc)
#
if os.environ.has_key('PATH'):
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Method 3 (Windows only)
#
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
#
if os.environ.has_key('PROGRAMFILES'):
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(os.environ['PROGRAMFILES'], 'ATT', 'GraphViz', 'bin')
else:
#Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None :
#print "Used default install location"
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/' ):
progs = __find_executables(path)
if progs is not None :
#print "Used path"
return progs
# Failed to find GraphViz
#
return None
class Common:
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node( default_node_name )
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
""""""
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
#for attr in self.obj_dict['attributes']:
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__( 'set_'+attr, lambda x, a=attr : self.obj_dict['attributes'].__setitem__(a, x) )
# Generate all the Getter methods.
#
self.__setattr__('get_'+attr, lambda a=attr : self.__get_attribute__(a))
class Error(Exception):
"""General error handling class.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""To indicate that a ploblem occurred while running any of the GraphViz executables.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(object, Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name = '', obj_dict = None, **attrs):
#
# Nodes will take attributes of all other types because the defaults
# for any GraphViz object are dealt with as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'node'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_node_list' ] = None
self.obj_dict[ 'sequence' ] = None
# Remove the compass point
#
port = None
if isinstance(name, basestring) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx+1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, (long, int)):
name = str(name)
self.obj_dict['name'] = quote_if_necessary( name )
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [ style ]
else:
styles = styles.split(',')
styles.append( style )
self.obj_dict['attributes']['style'] = ','.join( styles )
def to_string(self):
"""Returns a string representation of the node in dot language.
"""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
node_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
node_attr.append( attr )
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(object, Common ):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node's name
dst: destination node's name
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
if isinstance(src, (list, tuple)) and dst == '':
src, dst = src
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'edge'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_edge_list' ] = None
self.obj_dict[ 'sequence' ] = None
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = ( quote_if_necessary( src) , quote_if_necessary( dst) )
self.obj_dict['points'] = points
self.create_attribute_methods(EDGE_ATTRIBUTES)
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash( hash(self.get_source()) + hash(self.get_destination()) )
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error, "Can't compare and edge to a non-edge object."
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
#
if ( ( self.get_source() == edge.get_source() and self.get_destination() == edge.get_destination() ) or
( edge.get_source() == self.get_destination() and edge.get_destination() == self.get_source() ) ):
return True
else:
if self.get_source()==edge.get_source() and self.get_destination()==edge.get_destination() :
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if node_port_idx>0 and node_str[0]=='"' and node_str[node_port_idx-1]=='"':
return node_str
if node_port_idx>0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx+1:]
node = quote_if_necessary(a)
node += ':'+quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';'
class Graph(object, Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='G', obj_dict=None, graph_type='digraph', strict=False,
suppress_disconnected=False, simplify=False, **attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error, 'Invalid type "%s". Accepted graph types are: graph, digraph, subgraph' % graph_type
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node( Node('graph', **attrs) )
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node( Node('node', **attrs) )
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node( Node('edge', **attrs) )
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in the graph with no incoming or outgoing
edges. This option works also for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError('add_node() received a non node class object: ' + str(graph_node))
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] = [ graph_node.obj_dict ]
#self.node_dict[graph_node.get_name()] = graph_node.attributes
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].append( graph_node.obj_dict )
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if self.obj_dict['nodes'].has_key(name):
if index is not None and index < len(self.obj_dict['nodes'][name]):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if self.obj_dict['nodes'].has_key(name):
match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node, obj_dict_list in self.obj_dict['nodes'].iteritems():
node_objs.extend( [ Node( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError('add_edge() received a non edge class object: ' + str(graph_edge))
edge_points = ( graph_edge.get_source(), graph_edge.get_destination() )
if self.obj_dict['edges'].has_key(edge_points):
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ]
graph_edge.set_sequence( self.get_next_sequence_number() )
graph_edge.set_parent_graph( self.get_parent_graph() )
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance( src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, (int, long)):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if self.obj_dict['edges'].has_key( (src, dst) ):
if index is not None and index < len(self.obj_dict['edges'][(src, dst)]):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance( src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if self.obj_dict['edges'].has_key( edge_points ) or (
self.get_top_graph_type() == 'graph' and self.obj_dict['edges'].has_key( edge_points_reverse )):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get( edge_points_reverse, None ))
for edge_obj_dict in edges_obj_dict:
match.append( Edge( edge_points[0], edge_points[1], obj_dict = edge_obj_dict ) )
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge, obj_dict_list in self.obj_dict['edges'].iteritems():
edge_objs.extend( [ Edge( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and not isinstance(sgraph, Cluster):
raise TypeError('add_subgraph() received a non subgraph class object:' + str(sgraph))
if self.obj_dict['subgraphs'].has_key(sgraph.get_name()):
sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ]
sgraph_list.append( sgraph.obj_dict )
else:
self.obj_dict['subgraphs'][ sgraph.get_name() ] = [ sgraph.obj_dict ]
sgraph.set_sequence( self.get_next_sequence_number() )
sgraph.set_parent_graph( self.get_parent_graph() )
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if self.obj_dict['subgraphs'].has_key( name ):
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
#match.extend( Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list )
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph, obj_dict_list in self.obj_dict['subgraphs'].iteritems():
sgraph_objs.extend( [ Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list ] )
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for obj_list in self.obj_dict['nodes'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['edges'].itervalues():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['subgraphs'].itervalues():
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self==self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']:
graph.append( 'subgraph {\n' )
else:
graph.append( '{\n' )
else:
graph.append( '%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']) )
for attr in self.obj_dict['attributes'].iterkeys():
if self.obj_dict['attributes'].get(attr, None) is not None:
val = self.obj_dict['attributes'].get(attr)
if val is not None:
graph.append( '%s=%s' % (attr, quote_if_necessary(val)) )
else:
graph.append( attr )
graph.append( ';\n' )
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].itervalues():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] )
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].itervalues():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].itervalues():
sgraph_obj_dicts.extend(sg)
obj_list = [ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ]
obj_list.sort()
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append( node.to_string()+'\n' )
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append( edge.to_string() + '\n' )
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append( sgraph.to_string()+'\n' )
graph.append( '}\n' )
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(self, graph_name='', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG', suppress_disconnected=False, attribute=value, ...)
graph_name:
the cluster's name (the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='subG', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = 'cluster_'+graph_name
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.progs = None
self.formats = ['canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib' ]
self.prog = 'dot'
# Automatically creates all the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
self.__setattr__(
'create_'+frmt,
lambda f=frmt, prog=self.prog : self.create(format=f, prog=prog))
f = self.__dict__['create_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'create' method for more information.'''
for frmt in self.formats+['raw']:
self.__setattr__(
'write_'+frmt,
lambda path, f=frmt, prog=self.prog : self.write(path, format=f, prog=prog))
f = self.__dict__['write_'+frmt]
f.__doc__ = '''Refer to the docstring accompanying the 'write' method for more information.'''
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to be used as shapes or otherwise
those need to be in the same folder as the graph is going to be rendered
from. Alternatively the absolute path to the files can be specified when
including the graphics in the graph.
The files in the location pointed to by the path(s) specified as arguments
to this method will be copied to the same temporary location where the
graph is going to be rendered.
"""
if isinstance( file_paths, basestring ):
self.shape_files.append( file_paths )
if isinstance( file_paths, (list, tuple) ):
self.shape_files.extend( file_paths )
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def set_graphviz_executables(self, paths):
"""This method allows to manually specify the location of the GraphViz executables.
The argument to this method should be a dictionary where the keys are as follows:
{'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': ''}
and the values are the paths to the corresponding executable, including the name
of the executable itself.
"""
self.progs = paths
def write(self, path, prog=None, format='raw'):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
"""
if prog is None:
prog = self.prog
dot_fd = file(path, "w+b")
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except:
pass
try:
data = data.encode('utf-8')
except:
pass
dot_fd.write(data)
else:
dot_fd.write(self.create(prog, format))
dot_fd.close()
return True
def create(self, prog=None, format='ps'):
"""Creates and returns a Postscript representation of the graph.
create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats.
[create_ps(), create_gif(), create_dia(), ...]
If 'prog' is a list instead of a string the fist item is expected
to be the program name, followed by any optional command-line
arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
"""
if prog is None:
prog = self.prog
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
if self.progs is None:
self.progs = find_graphviz()
if self.progs is None:
raise InvocationException(
'GraphViz\'s executables not found' )
if not self.progs.has_key(prog):
raise InvocationException(
'GraphViz\'s executable "%s" not found' % prog )
if not os.path.exists( self.progs[prog] ) or not os.path.isfile( self.progs[prog] ):
raise InvocationException(
'GraphViz\'s executable "%s" is not a file or doesn\'t exist' % self.progs[prog] )
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name)
tmp_dir = os.path.dirname(tmp_name )
# For each of the image files...
#
for img in self.shape_files:
# Get its data
#
f = file(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in the temporary directory
#
f = file( os.path.join( tmp_dir, os.path.basename(img) ), 'wb' )
f.write(f_data)
f.close()
cmdline = [self.progs[prog], '-T'+format, tmp_name] + args
p = subprocess.Popen(
cmdline,
cwd=tmp_dir,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stderr = p.stderr
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
stdout_output = ''.join(stdout_output)
if not stderr.closed:
stderr_output = list()
while True:
data = stderr.read()
if not data:
break
stderr_output.append(data)
stderr.close()
if stderr_output:
stderr_output = ''.join(stderr_output)
#pid, status = os.waitpid(p.pid, 0)
status = p.wait()
if status != 0 :
raise InvocationException(
'Program terminated with status: %d. stderr follows: %s' % (
status, stderr_output) )
elif stderr_output:
print stderr_output
# For each of the image files...
#
for img in self.shape_files:
# remove it
#
os.unlink( os.path.join( tmp_dir, os.path.basename(img) ) )
os.unlink(tmp_name)
return stdout_output
| margulies/topography | utils_py/pydot/pydot.py | Python | mit | 61,340 |
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Metadata for Flood Vector
Impact on OSM Buildings using QGIS libraries.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'lucernae'
from safe.common.utilities import OrderedDict
from safe.defaults import building_type_postprocessor
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_classified,
layer_geometry_polygon,
hazard_flood,
hazard_category_single_event,
exposure_structure,
flood_vector_hazard_classes,
layer_mode_none,
building_type_field,
affected_field,
affected_value
)
class FloodPolygonBuildingFunctionMetadata(ImpactFunctionMetadata):
"""Metadata for Flood Vector on Building Impact Function using QGIS.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'FloodPolygonBuildingFunction',
'name': tr('Polygon flood on buildings'),
'impact': tr('Be flooded'),
'title': tr('Be flooded'),
'function_type': 'qgis2.0',
'author': 'Dmitry Kolesov',
'date_implemented': 'N/A',
'overview': tr('N/A'),
'detailed_description': tr('N/A'),
'hazard_input': '',
'exposure_input': '',
'output': '',
'actions': '',
'limitations': [],
'citations': [],
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_classified,
'layer_geometries': [layer_geometry_polygon],
'hazard_categories': [hazard_category_single_event],
'hazard_types': [hazard_flood],
'continuous_hazard_units': [],
'vector_hazard_classifications': [
flood_vector_hazard_classes],
'raster_hazard_classifications': [],
'additional_keywords': [affected_field, affected_value]
},
'exposure': {
'layer_mode': layer_mode_none,
'layer_geometries': [layer_geometry_polygon],
'exposure_types': [exposure_structure],
'exposure_units': [],
'additional_keywords': [building_type_field]
}
},
'parameters': OrderedDict([
# This field of the exposure layer contains
# information about building types
('building_type_field', 'TYPE'),
# This field of the hazard layer contains information
# about inundated areas
('affected_field', 'FLOODPRONE'),
# This value in 'affected_field' of the hazard layer
# marks the areas as inundated
('affected_value', 'YES'),
('postprocessors', OrderedDict([
('BuildingType', building_type_postprocessor())
]))
])
}
return dict_meta
| Jannes123/inasafe | safe/impact_functions/inundation/flood_vector_building_impact/metadata_definitions.py | Python | gpl-3.0 | 3,884 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to parse perf data from Chrome Endure test executions, to be graphed.
This script connects via HTTP to a buildbot master in order to scrape and parse
perf data from Chrome Endure tests that have been run. The perf data is then
stored in local text files to be graphed by the Chrome Endure graphing code.
It is assumed that any Chrome Endure tests that show up on the waterfall have
names that are of the following form:
"endure_<webapp_name>_test <test_name>" (non-Web Page Replay tests)
or
"endure_<webapp_name>_wpr_test <test_name>" (Web Page Replay tests)
For example: "endure_gmail_wpr_test testGmailComposeDiscard"
"""
import getpass
import logging
import optparse
import os
import re
import simplejson
import socket
import sys
import time
import urllib
import urllib2
CHROME_ENDURE_SLAVE_NAMES = [
'Linux (perf0)',
'Linux (perf1)',
'Linux (perf2)',
'Linux (perf3)',
'Linux (perf4)',
]
BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.pyauto/builders/'
LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__),
'_parser_last_processed.txt')
LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser()
def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir):
"""Sets up the directory containing results for a particular test, if needed.
Args:
webapp_name: The string name of the webapp associated with the given test.
test_name: The string name of the test.
dest_dir: The name of the destination directory that needs to be set up.
"""
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Test name directory.
os.chmod(dest_dir, 0755)
# Create config file.
config_file = os.path.join(dest_dir, 'config.js')
if not os.path.exists(config_file):
with open(config_file, 'w') as f:
f.write('var Config = {\n')
f.write('buildslave: "Chrome Endure Bots",\n')
f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(),
test_name))
f.write('};\n')
os.chmod(config_file, 0755)
# Set up symbolic links to the real graphing files.
link_file = os.path.join(dest_dir, 'index.html')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.html', link_file)
link_file = os.path.join(dest_dir, 'endure_plotter.js')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.js', link_file)
link_file = os.path.join(dest_dir, 'js')
if not os.path.exists(link_file):
os.symlink('../../js', link_file)
def WriteToDataFile(new_line, existing_lines, revision, data_file):
"""Writes a new entry to an existing perf data file to be graphed.
If there's an existing line with the same revision number, overwrite its data
with the new line. Else, prepend the info for the new revision.
Args:
new_line: A dictionary representing perf information for the new entry.
existing_lines: A list of string lines from the existing perf data file.
revision: The string revision number associated with the new perf entry.
data_file: The string name of the perf data file to which to write.
"""
overwritten = False
for i, line in enumerate(existing_lines):
line_dict = simplejson.loads(line)
if line_dict['rev'] == revision:
existing_lines[i] = simplejson.dumps(new_line)
overwritten = True
break
elif int(line_dict['rev']) < int(revision):
break
if not overwritten:
existing_lines.insert(0, simplejson.dumps(new_line))
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
def OutputPerfData(revision, graph_name, values, units, units_x, dest_dir,
is_stacked=False, stack_order=[]):
"""Outputs perf data to a local text file to be graphed.
Args:
revision: The string revision number associated with the perf data.
graph_name: The string name of the graph on which to plot the data.
values: A dict which maps a description to a value. A value is either a
single data value to be graphed, or a list of 2-tuples
representing (x, y) points to be graphed for long-running tests.
units: The string description for the y-axis units on the graph.
units_x: The string description for the x-axis units on the graph. Should
be set to None if the results are not for long-running graphs.
dest_dir: The name of the destination directory to which to write.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
stack_order: A list that contains key strings in the order to stack values
in the graph.
"""
# Update graphs.dat, which contains metadata associated with each graph.
existing_graphs = []
graphs_file = os.path.join(dest_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file, 'r') as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
existing_graphs = sorted(existing_graphs, key=lambda x: x['name'])
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs, indent=2))
os.chmod(graphs_file, 0755)
# Update summary data file, containing the actual data to be graphed.
data_file_name = graph_name + '-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_traces = {}
for description in values:
value = values[description]
if units_x:
points = []
for point in value:
points.append([str(point[0]), str(point[1])])
new_traces[description] = points
else:
new_traces[description] = [str(value), str(0.0)]
new_line = {
'traces': new_traces,
'rev': revision
}
if is_stacked:
new_line['stack'] = True
new_line['stack_order'] = stack_order
WriteToDataFile(new_line, existing_lines, revision, data_file)
def OutputEventData(revision, event_dict, dest_dir):
"""Outputs event data to a local text file to be graphed.
Args:
revision: The string revision number associated with the event data.
event_dict: A dict which maps a description to an array of tuples
representing event data to be graphed.
dest_dir: The name of the destination directory to which to write.
"""
data_file_name = '_EVENT_-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_events = {}
for description in event_dict:
event_list = event_dict[description]
value_list = []
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events[description] = value_list
new_line = {
'rev': revision,
'events': new_events
}
WriteToDataFile(new_line, existing_lines, revision, data_file)
def UpdatePerfDataFromFetchedContent(revision, content, webapp_name, test_name):
"""Update perf data from fetched stdio data.
Args:
revision: The string revision number associated with the new perf entry.
content: Fetched stdio data.
webapp_name: A name of the webapp.
test_name: A name of the test.
"""
perf_data_raw = []
def AppendRawPerfData(graph_name, description, value, units, units_x,
webapp_name, test_name, is_stacked=False):
perf_data_raw.append({
'graph_name': graph_name,
'description': description,
'value': value,
'units': units,
'units_x': units_x,
'webapp_name': webapp_name,
'test_name': test_name,
'stack': is_stacked,
})
# First scan for short-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', content):
AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], None,
webapp_name, webapp_name)
# Next scan for long-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', content):
# TODO(dmikurube): Change the condition to use stacked graph when we
# determine how to specify it.
AppendRawPerfData(match[0], match[1], eval(match[2]), match[3], match[4],
webapp_name, test_name, match[0].endswith('-DMP'))
# Next scan for events in the test results.
for match in re.findall(
r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', content):
AppendRawPerfData('_EVENT_', match[0], eval(match[1]), None, None,
webapp_name, test_name)
# For each graph_name/description pair that refers to a long-running test
# result or an event, concatenate all the results together (assume results
# in the input file are in the correct order). For short-running test
# results, keep just one if more than one is specified.
perf_data = {} # Maps a graph-line key to a perf data dictionary.
for data in perf_data_raw:
key_graph = data['graph_name']
key_description = data['description']
if not key_graph in perf_data:
perf_data[key_graph] = {
'graph_name': data['graph_name'],
'value': {},
'units': data['units'],
'units_x': data['units_x'],
'webapp_name': data['webapp_name'],
'test_name': data['test_name'],
}
perf_data[key_graph]['stack'] = data['stack']
if 'stack_order' not in perf_data[key_graph]:
perf_data[key_graph]['stack_order'] = []
if (data['stack'] and
data['description'] not in perf_data[key_graph]['stack_order']):
perf_data[key_graph]['stack_order'].append(data['description'])
if data['graph_name'] != '_EVENT_' and not data['units_x']:
# Short-running test result.
perf_data[key_graph]['value'][key_description] = data['value']
else:
# Long-running test result or event.
if key_description in perf_data[key_graph]['value']:
perf_data[key_graph]['value'][key_description] += data['value']
else:
perf_data[key_graph]['value'][key_description] = data['value']
# Finally, for each graph-line in |perf_data|, update the associated local
# graph data files if necessary.
for perf_data_key in perf_data:
perf_data_dict = perf_data[perf_data_key]
dest_dir = os.path.join(LOCAL_GRAPH_DIR, perf_data_dict['webapp_name'])
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Webapp name directory.
os.chmod(dest_dir, 0755)
dest_dir = os.path.join(dest_dir, perf_data_dict['test_name'])
SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'],
perf_data_dict['test_name'], dest_dir)
if perf_data_dict['graph_name'] == '_EVENT_':
OutputEventData(revision, perf_data_dict['value'], dest_dir)
else:
OutputPerfData(revision, perf_data_dict['graph_name'],
perf_data_dict['value'],
perf_data_dict['units'], perf_data_dict['units_x'],
dest_dir,
perf_data_dict['stack'], perf_data_dict['stack_order'])
def UpdatePerfDataForSlaveAndBuild(slave_info, build_num):
"""Process updated perf data for a particular slave and build number.
Args:
slave_info: A dictionary containing information about the slave to process.
build_num: The particular build number on the slave to process.
Returns:
True if the perf data for the given slave/build is updated properly, or
False if any critical error occurred.
"""
logging.debug(' %s, build %d.', slave_info['slave_name'], build_num)
build_url = (BUILDER_URL_BASE + urllib.quote(slave_info['slave_name']) +
'/builds/' + str(build_num))
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(build_url, timeout=60)
url_contents = fp.read()
except urllib2.URLError, e:
logging.exception('Error reading build URL "%s": %s', build_url, str(e))
return False
finally:
if fp:
fp.close()
# Extract the revision number for this build.
revision = re.findall(
r'<td class="left">got_revision</td>\s+<td>(\d+)</td>\s+<td>Source</td>',
url_contents)
if not revision:
logging.warning('Could not get revision number. Assuming build is too new '
'or was cancelled.')
return True # Do not fail the script in this case; continue with next one.
revision = revision[0]
# Extract any Chrome Endure stdio links for this build.
stdio_urls = []
links = re.findall(r'(/steps/endure[^/]+/logs/stdio)', url_contents)
for link in links:
link_unquoted = urllib.unquote(link)
found_wpr_result = False
match = re.findall(r'endure_([^_]+)_test ([^/]+)/', link_unquoted)
if not match:
match = re.findall(r'endure_([^_]+)_wpr_test ([^/]+)/', link_unquoted)
if match:
found_wpr_result = True
else:
logging.error('Test name not in expected format in link: ' +
link_unquoted)
return False
match = match[0]
webapp_name = match[0] + '_wpr' if found_wpr_result else match[0]
test_name = match[1]
stdio_urls.append({
'link': build_url + link + '/text',
'webapp_name': webapp_name,
'test_name': test_name,
})
# For each test stdio link, parse it and look for new perf data to be graphed.
for stdio_url_data in stdio_urls:
stdio_url = stdio_url_data['link']
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(stdio_url, timeout=60)
# Since in-progress test output is sent chunked, there's no EOF. We need
# to specially handle this case so we don't hang here waiting for the
# test to complete.
start_time = time.time()
while True:
data = fp.read(1024)
if not data:
break
url_contents += data
if time.time() - start_time >= 30: # Read for at most 30 seconds.
break
except (urllib2.URLError, socket.error), e:
# Issue warning but continue to the next stdio link.
logging.warning('Error reading test stdio URL "%s": %s', stdio_url,
str(e))
finally:
if fp:
fp.close()
UpdatePerfDataFromFetchedContent(revision, url_contents,
stdio_url_data['webapp_name'],
stdio_url_data['test_name'])
return True
def UpdatePerfDataFiles():
"""Updates the Chrome Endure graph data files with the latest test results.
For each known Chrome Endure slave, we scan its latest test results looking
for any new test data. Any new data that is found is then appended to the
data files used to display the Chrome Endure graphs.
Returns:
True if all graph data files are updated properly, or
False if any error occurred.
"""
slave_list = []
for slave_name in CHROME_ENDURE_SLAVE_NAMES:
slave_info = {}
slave_info['slave_name'] = slave_name
slave_info['most_recent_build_num'] = None
slave_info['last_processed_build_num'] = None
slave_list.append(slave_info)
# Identify the most recent build number for each slave.
logging.debug('Searching for latest build numbers for each slave...')
for slave in slave_list:
slave_name = slave['slave_name']
slave_url = BUILDER_URL_BASE + urllib.quote(slave_name)
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(slave_url, timeout=60)
url_contents = fp.read()
except urllib2.URLError, e:
logging.exception('Error reading builder URL: %s', str(e))
return False
finally:
if fp:
fp.close()
matches = re.findall(r'/(\d+)/stop', url_contents)
if matches:
slave['most_recent_build_num'] = int(matches[0])
else:
matches = re.findall(r'#(\d+)</a></td>', url_contents)
if matches:
slave['most_recent_build_num'] = sorted(map(int, matches),
reverse=True)[0]
else:
logging.error('Could not identify latest build number for slave %s.',
slave_name)
return False
logging.debug('%s most recent build number: %s', slave_name,
slave['most_recent_build_num'])
# Identify the last-processed build number for each slave.
logging.debug('Identifying last processed build numbers...')
if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE):
for slave_info in slave_list:
slave_info['last_processed_build_num'] = 0
else:
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp:
file_contents = fp.read()
for match in re.findall(r'([^:]+):(\d+)', file_contents):
slave_name = match[0].strip()
last_processed_build_num = match[1].strip()
for slave_info in slave_list:
if slave_info['slave_name'] == slave_name:
slave_info['last_processed_build_num'] = int(
last_processed_build_num)
for slave_info in slave_list:
if not slave_info['last_processed_build_num']:
slave_info['last_processed_build_num'] = 0
logging.debug('Done identifying last processed build numbers.')
# For each Chrome Endure slave, process each build in-between the last
# processed build num and the most recent build num, inclusive. To process
# each one, first get the revision number for that build, then scan the test
# result stdio for any performance data, and add any new performance data to
# local files to be graphed.
for slave_info in slave_list:
logging.debug('Processing %s, builds %d-%d...',
slave_info['slave_name'],
slave_info['last_processed_build_num'],
slave_info['most_recent_build_num'])
curr_build_num = slave_info['last_processed_build_num']
while curr_build_num <= slave_info['most_recent_build_num']:
if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num):
return False
curr_build_num += 1
# Log the newly-processed build numbers.
logging.debug('Logging the newly-processed build numbers...')
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f:
for slave_info in slave_list:
f.write('%s:%s\n' % (slave_info['slave_name'],
slave_info['most_recent_build_num']))
return True
def GenerateIndexPage():
"""Generates a summary (landing) page for the Chrome Endure graphs."""
logging.debug('Generating new index.html page...')
# Page header.
page = """
<html>
<head>
<title>Chrome Endure Overview</title>
<script language="javascript">
function DisplayGraph(name, graph) {
document.write(
'<td><iframe scrolling="no" height="438" width="700" src="');
document.write(name);
document.write('"></iframe></td>');
}
</script>
</head>
<body>
<center>
<h1>
Chrome Endure
</h1>
"""
# Print current time.
page += '<p>Updated: %s</p>\n' % (
time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z'))
# Links for each webapp.
webapp_names = [x for x in os.listdir(LOCAL_GRAPH_DIR) if
x not in ['js', 'old_data'] and
os.path.isdir(os.path.join(LOCAL_GRAPH_DIR, x))]
webapp_names = sorted(webapp_names)
page += '<p> ['
for i, name in enumerate(webapp_names):
page += '<a href="#%s">%s</a>' % (name.upper(), name.upper())
if i < len(webapp_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each webapp.
for webapp_name in webapp_names:
page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(),
webapp_name.upper())
# Links for each test for this webapp.
test_names = [x for x in
os.listdir(os.path.join(LOCAL_GRAPH_DIR, webapp_name))]
test_names = sorted(test_names)
page += '<p> ['
for i, name in enumerate(test_names):
page += '<a href="#%s">%s</a>' % (name, name)
if i < len(test_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each test for this webapp.
for test_name in test_names:
# Get the set of graph names for this test.
graph_names = [x[:x.find('-summary.dat')] for x in
os.listdir(os.path.join(LOCAL_GRAPH_DIR,
webapp_name, test_name))
if '-summary.dat' in x and '_EVENT_' not in x]
graph_names = sorted(graph_names)
page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name)
page += '<table>\n'
for i, graph_name in enumerate(graph_names):
if i % 2 == 0:
page += ' <tr>\n'
page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");'
'</script>\n' % (webapp_name, test_name, graph_name))
if i % 2 == 1:
page += ' </tr>\n'
if len(graph_names) % 2 == 1:
page += ' </tr>\n'
page += '</table>\n'
# Page footer.
page += """
</center>
</body>
</html>
"""
index_file = os.path.join(LOCAL_GRAPH_DIR, 'index.html')
with open(index_file, 'w') as f:
f.write(page)
os.chmod(index_file, 0755)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Use verbose logging.')
parser.add_option(
'-s', '--stdin', action='store_true', default=False,
help='Input from stdin instead of slaves for testing this script.')
options, _ = parser.parse_args(sys.argv)
logging_level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(level=logging_level,
format='[%(asctime)s] %(levelname)s: %(message)s')
if options.stdin:
content = sys.stdin.read()
UpdatePerfDataFromFetchedContent('12345', content, 'webapp', 'test')
else:
success = UpdatePerfDataFiles()
if not success:
logging.error('Failed to update perf data files.')
sys.exit(0)
GenerateIndexPage()
logging.debug('All done!')
if __name__ == '__main__':
main()
| junmin-zhu/chromium-rivertrail | chrome/test/functional/perf/endure_result_parser.py | Python | bsd-3-clause | 22,820 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-12 20:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myuser', '0003_auto_20160812_1501'),
]
operations = [
migrations.RemoveField(
model_name='myuser',
name='is_active',
),
migrations.RemoveField(
model_name='myuser',
name='is_admin',
),
]
| nessalc/django-basics | mysite/myuser/migrations/0004_auto_20160812_1502.py | Python | mit | 498 |
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x <=1: return x
i, j = 1, x
while i < j:
print i, j
m = (i+j) // 2
if m*m == x:
return m
elif m*m < x:
if (m+1)*(m+1) <= x:
i = m+1
else:
return m
else:
j = m-1
return i
print Solution().mySqrt(8) | xiaonanln/myleetcode-python | src/69. Sqrt(x).py | Python | apache-2.0 | 338 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient
class MetricStreaming(object):
"""
Metrics API client.
Metrics can be relayed from OneView for managed resources at a specified interval. The following steps can be
followed to enable the metric relay in OneView:
* Get the list of resource types and metrics which can be configured for live streaming
* Configure the live metric stream in OneView
* Receive the stream of metric on MSMB
The list below describes the structure of message relayed to MSMB:
startTime (str):
The starting time of the metric collection.
sampleIntervalInSeconds (int):
Interval between samples.
numberOfSamples (int):
Number of samples in the list for each metric type.
resourceType (str):
Identifies the resource type.
resourceDataList (list):
Metric sample list.
uri (str):
Canonical URI of the resource.
category (str):
Identifies the category of resource. The supported devices are server-hardware, enclosures, and
power-devices.
created (timestamp):
Date and time when the resource was created.
modified (timestamp):
Date and time when the resource was last modified.
eTag (str):
Entity tag/version ID of the resource, the same value that is returned in the ETag header on a GET of the
resource.
type (str):
Uniquely identifies the type of the JSON object.
"""
URI = '/rest/metrics'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_capability(self):
"""
Fetches the list of resource types and supported metrics that OneView is capable of relaying.
Returns:
list: List of resource types and supported metrics.
"""
return self._client.get(self.URI + "/capability")
def get_configuration(self):
"""
Fetches the current configuration for which metrics are being relayed.
Returns:
list: List of objects which contain frequency, sample interval, and source type for each resource-type.
"""
return self._client.get(self.URI + "/configuration")
def update_configuration(self, configuration):
"""
Updates the metrics configuration with the new values. Overwrites the existing configuration.
Args:
configuration (dict):
Dictionary with a list of objects which contain frequency, sample interval, and source type for each
resource-type.
Returns:
dict: The current configuration for which metrics are being relayed.
"""
return self._client.update(configuration, uri=self.URI + "/configuration")
| HewlettPackard/python-hpOneView | hpOneView/resources/data_services/metric_streaming.py | Python | mit | 4,307 |
# -*- coding: UTF-8 -*-
'''
Translate TextLeaf by Google Translate.
'''
__kupfer_name__ = _("Google Translate")
__kupfer_actions__ = ("Translate", "TranslateUrl", 'OpenTranslatePage')
__description__ = _("Translate text with Google Translate")
__version__ = "2010-09-06"
__author__ = "Karol Będkowski <[email protected]>"
import httplib
import locale
import urllib
import re
import socket
from kupfer.objects import Source, Action, TextLeaf, Leaf, UrlLeaf
from kupfer import icons, utils, pretty
try:
import cjson
json_decoder = cjson.decode
except ImportError:
import json
json_decoder = json.loads
_GOOGLE_TRANSLATE_HOST = 'ajax.googleapis.com'
_GOOGLE_TRANSLATE_PATH = '/ajax/services/language/translate?'
_GOOGLE_TRANS_LANG_PATH = '/#'
_GOOGLE_TRANS_LANG_HOST = 'translate.google.com'
_HEADER = {
'Content-type':'application/x-www-form-urlencoded',
'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html',
'Accept-charset': 'utf-8;q=0.7'
}
def _parse_encoding_header(response, default="UTF-8"):
"""Parse response's header for an encoding, that is return 'utf-8' for:
text/html; charset=utf-8
"""
ctype = response.getheader("content-type", "")
parts = ctype.split("charset=", 1)
if len(parts) > 1:
return parts[-1]
return default
def _translate(text, lang):
''' Translate @text to @lang. '''
query_param = urllib.urlencode(dict(v="1.0",langpair="|"+lang,
q=text.encode('utf-8')))
word_classes = {
# TRANS: Dictionary lookup word classes
"noun": _("noun"),
"verb": _("verb"),
"adjective": _("adjective"),
}
try:
conn = httplib.HTTPConnection(_GOOGLE_TRANSLATE_HOST)
conn.connect()
conn.sock.settimeout(10) # set timeout to 10 sec
conn.request("POST", _GOOGLE_TRANSLATE_PATH, query_param, _HEADER)
resp = conn.getresponse()
if resp.status != 200:
raise ValueError('invalid response %d, %s' % (resp.status,
resp.reason))
response_data = resp.read()
encoding = _parse_encoding_header(resp)
response_data = response_data.decode(encoding, 'replace')
pretty.print_debug(__name__, "Translate response:", repr(response_data))
try:
resp = json_decoder(response_data)
yield resp['responseData']['translatedText'], ''
except:
pretty.print_exc(__name__)
yield text, ''
except socket.timeout:
yield _("Google Translate connection timed out"), ""
except (httplib.HTTPException, ValueError), err:
pretty.print_error(__name__, '_translate error', repr(text), lang, err)
yield _("Error connecting to Google Translate"), ""
finally:
conn.close()
_RE_GET_LANG_SELECT = re.compile(
r'\<select[\w\d\s="\'-]*name=tl[\w\d\s="\']*\>(.*)\<\/select\>',
re.UNICODE|re.MULTILINE|re.IGNORECASE)
_RE_GET_LANG = re.compile(r"""\<option[ \w]+ value="([\w\-]+)"\> # code 'zh-TW'
([^<]+?) # match localized lang name
\</option\>
""", re.UNICODE|re.IGNORECASE|re.VERBOSE)
def _load_languages():
''' Load available languages from Google.
Generator: (lang_code, lang name)
'''
user_language = locale.getlocale(locale.LC_MESSAGES)[0]
pretty.print_debug(__name__, '_load_languages')
try:
conn = httplib.HTTPConnection(_GOOGLE_TRANS_LANG_HOST)
conn.connect()
conn.sock.settimeout(10) # set timeout to 10 sec
headers = {
"Accept-Language": "%s, en;q=0.7" % user_language,
}
conn.request("GET", _GOOGLE_TRANS_LANG_PATH, headers=headers)
resp = conn.getresponse()
if resp.status != 200:
raise ValueError('invalid response %d, %s' % (resp.status,
resp.reason))
result = resp.read().decode(_parse_encoding_header(resp), "replace")
result = _RE_GET_LANG_SELECT.findall(result)
if result:
for key, name in _RE_GET_LANG.findall(result[0]):
yield key, name
except socket.timeout:
pretty.print_error(__name__, 'Timed out when loading languages')
except (httplib.HTTPException, ValueError, socket.error), err:
pretty.print_error(__name__, '_load_languages error', type(err), err)
finally:
conn.close()
class Translate (Action):
def __init__(self):
Action.__init__(self, _("Translate To..."))
def activate(self, leaf, iobj):
text = unicode(leaf.object)
dest_lang = iobj.object
return _TranslateQuerySource(text, dest_lang, unicode(iobj))
def is_factory(self):
return True
def item_types(self):
yield TextLeaf
def valid_for_item(self, leaf):
return len(leaf.object.strip()) > 0
def get_description(self):
return _("Translate text with Google Translate")
def get_icon_name(self):
return "accessories-dictionary"
def requires_object(self):
return True
def object_types(self):
yield _Language
def object_source(self, for_item=None):
return _LangSource()
class TranslationLeaf(TextLeaf):
def __init__(self, translation, descr):
TextLeaf.__init__(self, translation)
self._descrtiption = descr
def get_description(self):
return self._descrtiption or TextLeaf.get_description(self)
class _TranslateQuerySource(Source):
def __init__(self, text, lang, language_name):
Source.__init__(self, name=_("Translate into %s") % language_name)
self._text = text
self._lang = lang
def repr_key(self):
return (hash(self._text), self._lang)
def get_items(self):
for translation, desc in _translate(self._text, self._lang):
yield TranslationLeaf(translation.replace('\\n ', '\n'), desc)
class _Language(Leaf):
serializable = 1
def get_gicon(self):
return icons.ComposedIcon("text-x-generic","preferences-desktop-locale")
# cache for Languages (load it once)
_LANG_CACHE = None
class _LangSource(Source):
def __init__(self):
Source.__init__(self, _("Languages"))
def get_items(self):
global _LANG_CACHE
if not _LANG_CACHE:
_LANG_CACHE = tuple((
_Language(key, name.title())
for key, name in _load_languages()
))
return _LANG_CACHE
def provides(self):
yield _Language
def get_icon_name(self):
return "preferences-desktop-locale"
class TranslateUrl(Action):
def __init__(self):
Action.__init__(self, _("Translate To..."))
def activate(self, leaf, iobj):
dest_lang = iobj.object
params = urllib.urlencode(dict(u=leaf.object, sl='auto', tl=dest_lang ))
url = 'http://translate.google.com/translate?' + params
utils.show_url(url)
def item_types(self):
yield UrlLeaf
def valid_for_item(self, leaf):
return leaf.object.startswith('http://') or leaf.object.startswith('www.')
def get_description(self):
return _("Show translated page in browser")
def get_icon_name(self):
return "accessories-dictionary"
def requires_object(self):
return True
def object_types(self):
yield _Language
def object_source(self, for_item=None):
return _LangSource()
class OpenTranslatePage (Action):
def __init__(self):
Action.__init__(self, _("Show Translation To..."))
def activate(self, leaf, iobj):
text = urllib.quote(unicode(leaf.object).encode('utf-8'))
dest_lang = iobj.object
url = 'http://' + _GOOGLE_TRANSLATE_HOST + _GOOGLE_TRANS_LANG_PATH + \
"#auto|" + dest_lang + "|" + text
utils.show_url(url)
def item_types(self):
yield TextLeaf
def valid_for_item(self, leaf):
return len(leaf.object.strip()) > 0
def get_description(self):
return _("Show translation in browser")
def get_icon_name(self):
return "accessories-dictionary"
def requires_object(self):
return True
def object_types(self):
yield _Language
def object_source(self, for_item=None):
return _LangSource()
| cjparsons74/kupfer | kupfer/plugin/google_translate.py | Python | gpl-3.0 | 7,524 |
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
import time
import freenect
import calibkinect
import pykinectwindow as wxwindow
# I probably need more help with these!
try:
TEXTURE_TARGET = GL_TEXTURE_RECTANGLE
except:
TEXTURE_TARGET = GL_TEXTURE_RECTANGLE_ARB
if not 'win' in globals():
win = wxwindow.Window(size=(640, 480))
def refresh():
win.Refresh()
print type(win)
if not 'rotangles' in globals(): rotangles = [0,0]
if not 'zoomdist' in globals(): zoomdist = 1
if not 'projpts' in globals(): projpts = (None, None)
if not 'rgb' in globals(): rgb = None
def create_texture():
global rgbtex
rgbtex = glGenTextures(1)
glBindTexture(TEXTURE_TARGET, rgbtex)
glTexImage2D(TEXTURE_TARGET,0,GL_RGB,640,480,0,GL_RGB,GL_UNSIGNED_BYTE,None)
if not '_mpos' in globals(): _mpos = None
@win.eventx
def EVT_LEFT_DOWN(event):
global _mpos
_mpos = event.Position
@win.eventx
def EVT_LEFT_UP(event):
global _mpos
_mpos = None
@win.eventx
def EVT_MOTION(event):
global _mpos
if event.LeftIsDown():
if _mpos:
(x,y),(mx,my) = event.Position,_mpos
rotangles[0] += y-my
rotangles[1] += x-mx
refresh()
_mpos = event.Position
@win.eventx
def EVT_MOUSEWHEEL(event):
global zoomdist
dy = event.WheelRotation
zoomdist *= np.power(0.95, -dy)
refresh()
clearcolor = [0,0,0,0]
@win.event
def on_draw():
if not 'rgbtex' in globals():
create_texture()
xyz, uv = projpts
if xyz is None: return
if not rgb is None:
rgb_ = (rgb.astype(np.float32) * 4 + 70).clip(0,255).astype(np.uint8)
glBindTexture(TEXTURE_TARGET, rgbtex)
glTexSubImage2D(TEXTURE_TARGET, 0, 0, 0, 640, 480, GL_RGB, GL_UNSIGNED_BYTE, rgb_);
glClearColor(*clearcolor)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
# flush that stack in case it's broken from earlier
glPushMatrix()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, 4/3., 0.3, 200)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def mouse_rotate(xAngle, yAngle, zAngle):
glRotatef(xAngle, 1.0, 0.0, 0.0);
glRotatef(yAngle, 0.0, 1.0, 0.0);
glRotatef(zAngle, 0.0, 0.0, 1.0);
glScale(zoomdist,zoomdist,1)
glTranslate(0, 0,-3.5)
mouse_rotate(rotangles[0], rotangles[1], 0);
glTranslate(0,0,1.5)
#glTranslate(0, 0,-1)
# Draw some axes
if 0:
glBegin(GL_LINES)
glColor3f(1,0,0); glVertex3f(0,0,0); glVertex3f(1,0,0)
glColor3f(0,1,0); glVertex3f(0,0,0); glVertex3f(0,1,0)
glColor3f(0,0,1); glVertex3f(0,0,0); glVertex3f(0,0,1)
glEnd()
# We can either project the points ourselves, or embed it in the opengl matrix
if 0:
dec = 4
v,u = mgrid[:480,:640].astype(np.uint16)
points = np.vstack((u[::dec,::dec].flatten(),
v[::dec,::dec].flatten(),
depth[::dec,::dec].flatten())).transpose()
points = points[points[:,2]<2047,:]
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMultMatrixf(calibkinect.uv_matrix().transpose())
glMultMatrixf(calibkinect.xyz_matrix().transpose())
glTexCoordPointers(np.array(points))
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glMultMatrixf(calibkinect.xyz_matrix().transpose())
glVertexPointers(np.array(points))
else:
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glVertexPointerf(xyz)
glTexCoordPointerf(uv)
# Draw the points
glPointSize(2)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
glEnable(TEXTURE_TARGET)
glColor3f(1,1,1)
glDrawElementsui(GL_POINTS, np.array(range(xyz.shape[0])))
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glDisable(TEXTURE_TARGET)
glPopMatrix()
#
if 0:
inds = np.nonzero(xyz[:,2]>-0.55)
glPointSize(10)
glColor3f(0,1,1)
glEnableClientState(GL_VERTEX_ARRAY)
glDrawElementsui(GL_POINTS, np.array(inds))
glDisableClientState(GL_VERTEX_ARRAY)
if 0:
# Draw only the points in the near plane
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glColor(0.9,0.9,1.0,0.8)
glPushMatrix()
glTranslate(0,0,-0.55)
glScale(0.6,0.6,1)
glBegin(GL_QUADS)
glVertex3f(-1,-1,0); glVertex3f( 1,-1,0);
glVertex3f( 1, 1,0); glVertex3f(-1, 1,0);
glEnd()
glPopMatrix()
glDisable(GL_BLEND)
glPopMatrix()
# A silly loop that shows you can busy the ipython thread while opengl runs
def playcolors():
while 1:
global clearcolor
clearcolor = [np.random.random(),0,0,0]
time.sleep(0.1)
refresh()
# Update the point cloud from the shell or from a background thread!
def update(dt=0):
global projpts, rgb, depth
depth,_ = freenect.sync_get_depth()
rgb,_ = freenect.sync_get_video()
q = depth
X,Y = np.meshgrid(range(640),range(480))
# YOU CAN CHANGE THIS AND RERUN THE PROGRAM!
# Point cloud downsampling
d = 4
projpts = calibkinect.depth2xyzuv(q[::d,::d],X[::d,::d],Y[::d,::d])
refresh()
def update_join():
update_on()
try:
_thread.join()
except:
update_off()
def update_on():
global _updating
if not '_updating' in globals(): _updating = False
if _updating: return
_updating = True
from threading import Thread
global _thread
def _run():
while _updating:
update()
_thread = Thread(target=_run)
_thread.start()
def update_off():
global _updating
_updating = False
# Get frames in a loop and display with opencv
def loopcv():
import cv
while 1:
cv.ShowImage('hi',get_depth().astype(np.uint8))
cv.WaitKey(10)
update()
#update_on()
| Dining-Engineers/left-luggage-detection | misc/demo/ipython/demo_pclview.py | Python | gpl-2.0 | 5,742 |
import wx, os, sys
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ColumnSorterMixin, ListCtrlAutoWidthMixin
from wx.lib.scrolledpanel import ScrolledPanel
from traceback import print_exc
from Tribler.Main.vwxGUI.GuiUtility import GUIUtility
from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue
DEBUG = False
class tribler_topButton(wx.Panel):
"""
Button that changes the image shown if you move your mouse over it.
It redraws the background of the parent Panel, if this is an imagepanel with
a variable self.bitmap.
"""
__bitmapCache = {}
ENABLED = 0x1
SELECTED = 0x2
MOUSE_OVER = 0x4
TOGGLED = 0x8
def __init__(self, *args, **kw):
self.ready = False
if len(args) == 0:
self.backgroundColor = wx.WHITE
pre = wx.PrePanel()
# the Create step is done by XRC.
self.PostCreate(pre)
self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate)
else:
self.backgroundColor = ((230,230,230))
wx.Panel.__init__(self, *args, **kw)
self._PostInit()
def OnCreate(self, event):
self.Unbind(wx.EVT_WINDOW_CREATE)
wx.CallAfter(self._PostInit)
event.Skip()
return True
def _PostInit(self):
self.guiUtility = GUIUtility.getInstance()
self.utility = self.guiUtility.utility
self.location = None
self.state = tribler_topButton.ENABLED
self.loadBitmaps()
self.setParentBitmap()
self.SetMinSize(self.bitmaps[0].GetSize())
self.Bind(wx.EVT_MOUSE_EVENTS, self.mouseAction)
self.Bind(wx.EVT_MOVE, self.setParentBitmap)
self.Bind(wx.EVT_SIZE, self.setParentBitmap)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Refresh()
self.ready = True
def loadBitmaps(self):
self.bitmaps = [None, None]
# get the image directory
self.imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images')
# find a file with same name as this panel
self.bitmapPath = [os.path.join(self.imagedir, self.GetName()+'.png'), os.path.join(self.imagedir, self.GetName()+'_clicked.png')]
i = 0
for img in self.bitmapPath:
if not os.path.isfile(img):
print >>sys.stderr,"TopButton: Could not find image:",img
try:
if img not in tribler_topButton.__bitmapCache:
tribler_topButton.__bitmapCache[img] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY)
self.bitmaps[i] = tribler_topButton.__bitmapCache[img]
except:
print_exc()
i+=1
def setEnabled(self, enabled):
if enabled:
self.state = self.state | tribler_topButton.ENABLED
else:
self.state = self.state ^ tribler_topButton.ENABLED
self.Refresh()
def IsEnabled(self):
return self.state & tribler_topButton.ENABLED
def mouseAction(self, event):
event.Skip()
if event.Entering():
self.state = self.state | tribler_topButton.MOUSE_OVER
self.Refresh()
elif event.Leaving():
self.state = self.state ^ tribler_topButton.MOUSE_OVER
self.Refresh()
def setParentBitmap(self, event = None):
try:
parent = self.GetParent()
bitmap = parent.bitmap
location = self.GetPosition()
if location != self.location:
rect = [location[0], location[1], self.GetClientSize()[0], self.GetClientSize()[1]]
bitmap = self.getBitmapSlice(bitmap, rect)
self.parentBitmap = bitmap
self.Refresh()
self.location = location
except:
self.parentBitmap = None
try:
parent = self.GetParent()
self.parentColor = parent.GetBackgroundColour()
except:
self.parentColor = None
def getBitmapSlice(self, bitmap, rect):
try:
bitmapSize = bitmap.GetSize()
rects = []
rect[0] = max(0, rect[0])
rect[1] = max(0, rect[1])
#this bitmap could be smaller than the actual requested rect, due to repeated background
#using % to modify start location
if rect[0] > bitmapSize[0] or rect[1] > bitmapSize[1]:
rect[0] %= bitmapSize[0]
rect[1] %= bitmapSize[1]
rect[2] = min(rect[2], bitmapSize[0])
rect[3] = min(rect[3], bitmapSize[1])
#request one part of the background starting at
additionalWidth = rect[2]
additionalHeight = rect[3]
if rect[0] + rect[2] > bitmapSize[0]:
additionalWidth = bitmapSize[0] - rect[0]
if rect[1] + rect[3] > bitmapSize[1]:
additionalHeight = bitmapSize[1] - rect[1]
rects.append(((0,0),[rect[0], rect[1], additionalWidth, additionalHeight]))
#check if image is smaller than requested width
if rect[0] + rect[2] > bitmapSize[0]:
additionalWidth = rect[0]
additionalHeight = bitmapSize[1]
if rect[1] + rect[3] > bitmapSize[1]:
additionalHeight = bitmapSize[1] - rect[1]
rects.append(((bitmapSize[0]-rect[0], 0),[0, rect[1], additionalWidth, additionalHeight]))
#check if image is smaller than requested height
if rect[1]+ rect[3] > bitmapSize[1]:
additionalWidth = bitmapSize[0]
additionalHeight = rect[1]
if rect[0] + rect[2] > bitmapSize[0]:
additionalWidth = bitmapSize[0] - rect[0]
rects.append(((0,bitmapSize[1] - rect[1]),[rect[0], 0, additionalWidth, additionalHeight]))
#if both width and height were smaller
if rect[0] + rect[2] > bitmapSize[0] and rect[1] + rect[3] > bitmapSize[1]:
rects.append(((bitmapSize[0]-rect[0],bitmapSize[1] - rect[1]),[0,0,rect[0],rect[1]]))
bmp = wx.EmptyBitmap(rect[2], rect[3])
dc = wx.MemoryDC(bmp)
for location, rect in rects:
subbitmap = bitmap.GetSubBitmap(rect)
dc.DrawBitmapPoint(subbitmap, location)
dc.SelectObject(wx.NullBitmap)
return bmp
except:
if DEBUG:
print_exc()
return None
def setBackground(self, wxColor):
self.backgroundColor = wxColor
self.Refresh()
def GetBitmap(self):
if (self.state & tribler_topButton.MOUSE_OVER) and self.bitmaps[1]:
return self.bitmaps[1]
return self.bitmaps[0]
def OnPaint(self, evt):
if self.ready:
dc = wx.BufferedPaintDC(self)
dc.SetBackground(wx.Brush(self.backgroundColor))
dc.Clear()
if self.parentBitmap:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.BrushFromBitmap(self.parentBitmap))
w, h = self.GetClientSize()
dc.DrawRectangle(0, 0, w, h)
elif self.parentColor:
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.Brush(self.parentColor))
w, h = self.GetClientSize()
dc.DrawRectangle(0, 0, w, h)
if not self.IsEnabled():
return
bitmap = self.GetBitmap()
if bitmap:
dc.DrawBitmap(bitmap, 0,0, True)
class SwitchButton(tribler_topButton):
__bitmapCache = {}
def loadBitmaps(self):
self.bitmaps = [None, None, None, None]
# get the image directory
imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images')
# find a file with same name as this panel
bitmapPath = [os.path.join(imagedir, self.GetName()+'.png'),
os.path.join(imagedir, self.GetName()+'_clicked.png'),
os.path.join(imagedir, self.GetName()+'Enabled.png'),
os.path.join(imagedir, self.GetName()+'Enabled_clicked.png')
]
i = 0
for img in bitmapPath:
if not os.path.isfile(img):
print >>sys.stderr,"SwitchButton: Could not find image:",img
try:
if img not in SwitchButton.__bitmapCache:
SwitchButton.__bitmapCache[img] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY)
self.bitmaps[i] = SwitchButton.__bitmapCache[img]
except:
print_exc()
i+=1
def setToggled(self, b):
if b:
self.state = self.state | tribler_topButton.TOGGLED
else:
self.state = self.state ^ tribler_topButton.TOGGLED
self.Refresh()
def isToggled(self):
return self.state & tribler_topButton.TOGGLED
def GetBitmap(self):
add = 0
if self.isToggled():
add = 2
if (self.state & tribler_topButton.MOUSE_OVER) and self.bitmaps[1+add]:
return self.bitmaps[1+add]
return self.bitmaps[0+add]
class settingsButton(tribler_topButton):
"""
Button with three states in the settings overview
"""
__bitmapCache = {}
def __init__(self, *args, **kw):
tribler_topButton.__init__(self, *args, **kw)
self.selected = 1
def _PostInit(self):
tribler_topButton._PostInit(self)
def loadBitmaps(self):
self.bitmaps = [None, None, None]
# get the image directory
imagedir = os.path.join(self.guiUtility.vwxGUI_path, 'images')
# find a file with same name as this panel
bitmapPath = [os.path.join(imagedir, self.GetName()+'_state1.png'),
os.path.join(imagedir, self.GetName()+'_state2.png'),
os.path.join(imagedir, self.GetName()+'_state3.png')]
i = 0
for img in bitmapPath:
if not os.path.isfile(img):
print >>sys.stderr,"TopButton: Could not find image:",img
try:
if img not in settingsButton.__bitmapCache:
settingsButton.__bitmapCache[img] = wx.Bitmap(img, wx.BITMAP_TYPE_ANY)
self.bitmaps[i] = settingsButton.__bitmapCache[img]
except:
print_exc()
i+=1
def setSelected(self, sel):
self.selected = sel
self.Refresh()
def getSelected(self):
return self.selected
def mouseAction(self, event):
pass
def GetBitmap(self):
return self.bitmaps[self.selected]
class LinkStaticText(wx.Panel):
def __init__(self, parent, text, icon = "bullet_go.png", font_increment = 0):
wx.Panel.__init__(self, parent, style = wx.NO_BORDER)
self.SetBackgroundColour(parent.GetBackgroundColour())
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.text = wx.StaticText(self, -1, text)
font = self.text.GetFont()
font.SetUnderlined(True)
font.SetPointSize(font.GetPointSize() + font_increment)
self.text.SetFont(font)
self.text.SetForegroundColour('#0473BB')
self.text.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
hSizer.Add(self.text, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 3)
if icon:
self.icon = wx.StaticBitmap(self, bitmap = wx.Bitmap(os.path.join(GUIUtility.getInstance().vwxGUI_path, 'images', icon), wx.BITMAP_TYPE_ANY))
self.icon.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
hSizer.Add(self.icon, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(hSizer)
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
def SetToolTipString(self, tip):
wx.Panel.SetToolTipString(self, tip)
self.text.SetToolTipString(tip)
if getattr(self, 'icon', False):
self.icon.SetToolTipString(tip)
def SetLabel(self, text):
self.text.SetLabel(text)
def Bind(self, event, handler, source=None, id=-1, id2=-1):
wx.Panel.Bind(self, event, handler, source, id, id2)
self.text.Bind(event, handler, source, id, id2)
if getattr(self, 'icon', False):
self.icon.Bind(event, handler, source, id, id2)
class AutoWidthListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self, parent, style):
wx.ListCtrl.__init__(self, parent, style=style)
ListCtrlAutoWidthMixin.__init__(self)
class SortedListCtrl(wx.ListCtrl, ColumnSorterMixin, ListCtrlAutoWidthMixin):
def __init__(self, parent, numColumns, style = wx.LC_REPORT|wx.LC_NO_HEADER, tooltip = True):
wx.ListCtrl.__init__(self, parent, -1, style=style)
ColumnSorterMixin.__init__(self, numColumns)
ListCtrlAutoWidthMixin.__init__(self)
self.itemDataMap = {}
if tooltip:
self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
def GetListCtrl(self):
return self
def OnMouseMotion(self, event):
tooltip = ''
row, _ = self.HitTest(event.GetPosition())
if row >= 0:
try:
for col in xrange(self.GetColumnCount()):
tooltip += self.GetItem(row, col).GetText() + "\t"
if len(tooltip) > 0:
tooltip = tooltip[:-1]
except:
pass
self.SetToolTipString(tooltip)
class SelectableListCtrl(SortedListCtrl):
def __init__(self, parent, numColumns, style = wx.LC_REPORT|wx.LC_NO_HEADER, tooltip = True):
SortedListCtrl.__init__(self, parent, numColumns, style, tooltip)
self.Bind(wx.EVT_KEY_DOWN, self._CopyToClipboard)
def _CopyToClipboard(self, event):
if event.ControlDown():
if event.GetKeyCode() == 67: #ctrl + c
data = ""
selected = self.GetFirstSelected()
while selected != -1:
for col in xrange(self.GetColumnCount()):
data += self.GetItem(selected, col).GetText() + "\t"
data += "\n"
selected = self.GetNextSelected(selected)
do = wx.TextDataObject()
do.SetText(data)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
elif event.GetKeyCode() == 65: #ctrl + a
for index in xrange(self.GetItemCount()):
self.Select(index)
class TextCtrlAutoComplete(wx.TextCtrl):
def __init__ (self, parent, choices = [], entrycallback = None, selectcallback = None, **therest):
'''
Constructor works just like wx.TextCtrl except you can pass in a list of choices.
You can also change the choice list at any time by calling SetChoices.
'''
if therest.has_key('style'):
therest['style']=wx.TE_PROCESS_ENTER|therest['style']
else:
therest['style']= wx.TE_PROCESS_ENTER
wx.TextCtrl.__init__(self , parent , **therest)
# we need the GUITaskQueue to offload database activity, otherwise we may lock the GUI
self.text = ""
self.guiserver = GUITaskQueue.getInstance()
self.screenheight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
self.dropdown = wx.PopupWindow(self)
self.dropdown.SetBackgroundColour(wx.WHITE)
sizer = wx.BoxSizer()
self.dropdownlistbox = AutoWidthListCtrl(self.dropdown, style=wx.LC_REPORT | wx.BORDER_NONE | wx.LC_SINGLE_SEL | wx.LC_NO_HEADER)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DOWN, self.ListClick)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DCLICK, self.ListClick)
sizer.Add(self.dropdownlistbox, 1, wx.EXPAND|wx.ALL, 3)
self.dropdown.SetSizer(sizer)
self.SetChoices(choices)
self.entrycallback = entrycallback
self.selectcallback = selectcallback
gp = self
while (gp <> None) :
gp.Bind (wx.EVT_MOVE , self.ControlChanged, gp)
gp.Bind (wx.EVT_SIZE , self.ControlChanged, gp)
gp = gp.GetParent()
self.Bind (wx.EVT_KILL_FOCUS, self.ControlChanged, self)
self.Bind (wx.EVT_TEXT , self.EnteredText, self)
self.Bind (wx.EVT_KEY_DOWN , self.KeyDown, self)
self.Bind (wx.EVT_LEFT_DOWN , self.ClickToggleDown, self)
self.Bind (wx.EVT_LEFT_UP , self.ClickToggleUp, self)
self.dropdown.Bind (wx.EVT_LISTBOX , self.ListItemSelected, self.dropdownlistbox)
def ListClick(self, evt):
toSel, _ = self.dropdownlistbox.HitTest(evt.GetPosition())
if toSel == -1:
return
self.dropdownlistbox.Select(toSel)
self.SetValueFromSelected()
def SetChoices (self, choices = [""]) :
''' Sets the choices available in the popup wx.ListBox. '''
self.choices = choices
#delete, if need, all the previous data
if self.dropdownlistbox.GetColumnCount() != 0:
self.dropdownlistbox.DeleteAllColumns()
self.dropdownlistbox.DeleteAllItems()
self.dropdownlistbox.InsertColumn(0, "Select")
for num, it in enumerate(choices):
self.dropdownlistbox.InsertStringItem(num, it)
itemcount = min(len(choices), 7) + 2
charheight = self.dropdownlistbox.GetCharHeight()
self.popupsize = wx.Size(self.GetClientSize()[0], (charheight*itemcount) + 6)
self.dropdown.SetClientSize(self.popupsize)
self.dropdown.Layout()
def ControlChanged (self, event) :
self.ShowDropDown(False)
event.Skip()
def EnteredText (self, event):
text = event.GetString()
if text != self.text:
self.text = text
if self.entrycallback:
def wx_callback(choices):
"""
Will update the gui IF the user did not yet change the input text
"""
if text == self.text:
self.SetChoices(choices)
if len(self.choices) == 0:
self.ShowDropDown(False)
else:
self.ShowDropDown(True)
def db_callback():
"""
Will try to find completions in the database IF the user did not yet change the
input text
"""
if text == self.text:
choices = self.entrycallback(text)
wx.CallAfter(wx_callback, choices)
self.guiserver.add_task(db_callback)
event.Skip()
def KeyDown (self, event) :
skip = True
sel = self.dropdownlistbox.GetFirstSelected()
visible = self.dropdown.IsShown()
if event.GetKeyCode() == wx.WXK_DOWN :
if sel < (self.dropdownlistbox.GetItemCount () - 1) :
self.dropdownlistbox.Select (sel+1)
self.ListItemVisible()
self.ShowDropDown ()
skip = False
if event.GetKeyCode() == wx.WXK_UP :
if sel > 0 :
self.dropdownlistbox.Select (sel - 1)
self.ListItemVisible()
self.ShowDropDown ()
skip = False
if visible :
if event.GetKeyCode() == wx.WXK_RETURN or event.GetKeyCode() == wx.WXK_SPACE:
if sel > -1:
skip = event.GetKeyCode() == wx.WXK_RETURN
self.SetValueFromSelected()
if event.GetKeyCode() == wx.WXK_ESCAPE :
self.ShowDropDown(False)
skip = False
if skip:
event.Skip()
def ClickToggleDown (self, event) :
self.lastinsertionpoint = self.GetInsertionPoint()
event.Skip ()
def ClickToggleUp (self, event) :
if (self.GetInsertionPoint() == self.lastinsertionpoint) :
self.ShowDropDown (not self.dropdown.IsShown())
event.Skip ()
def SetValueFromSelected(self, doCallback = False) :
'''
Sets the wx.TextCtrl value from the selected wx.ListBox item.
Will do nothing if no item is selected in the wx.ListBox.
'''
sel = self.dropdownlistbox.GetFirstSelected()
if sel > -1 :
newval = self.dropdownlistbox.GetItemText(sel)
self.SetValue(newval)
self.SetInsertionPoint(len(newval))
self.selectcallback()
def ShowDropDown(self, show = True) :
''' Either display the drop down list (show = True) or hide it (show = False). '''
if show:
show = len(self.choices) > 0
if show:
focusWin = wx.Window.FindFocus()
show = focusWin == self
if show and not self.dropdown.IsShown():
size = self.dropdown.GetSize()
width, height = self.GetSizeTuple()
x, y = self.ClientToScreenXY (0, height)
if size.GetWidth() <> width :
size.SetWidth(width)
self.dropdown.SetSize(size)
if (y + size.GetHeight()) < self.screenheight :
self.dropdown.SetPosition (wx.Point(x, y))
else:
self.dropdown.SetPosition (wx.Point(x, y - height - size.GetHeight()))
self.dropdown.Show(show)
def ListItemVisible(self) :
''' Moves the selected item to the top of the list ensuring it is always visible. '''
self.dropdownlistbox.EnsureVisible(self.dropdownlistbox.GetFirstSelected())
def ListItemSelected (self, event) :
self.SetValueFromSelected()
event.Skip()
return self
class ImageScrollablePanel(ScrolledPanel):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.HSCROLL|wx.VSCROLL):
ScrolledPanel.__init__(self, parent, id, pos, size, style)
self.bitmap = None
wx.EVT_PAINT(self, self.OnPaint)
def OnPaint(self, evt):
if self.bitmap:
obj = evt.GetEventObject()
dc = wx.BufferedPaintDC(obj)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.BrushFromBitmap(self.bitmap))
w, h = self.GetClientSize()
dc.DrawRectangle(0, 0, w, h)
else:
evt.Skip()
def SetBitmap(self, bitmap):
self.bitmap = bitmap
self.Refresh()
| egbertbouman/tribler-g | Tribler/Main/vwxGUI/tribler_topButton.py | Python | lgpl-2.1 | 24,111 |
"""Video Analyzer"""
| peragro/peragro-at | src/damn_at/analyzers/video/__init__.py | Python | bsd-3-clause | 21 |
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates an InfoGAN TFGAN trained MNIST model.
The image visualizations, as in https://arxiv.org/abs/1606.03657, show the
effect of varying a specific latent variable on the image. Each visualization
focuses on one of the three structured variables. Columns have two of the three
variables fixed, while the third one is varied. Different rows have different
random samples from the remaining latents.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.mnist import infogan_eval_lib
flags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/mnist/',
'Directory where the results are saved to.')
flags.DEFINE_integer(
'noise_samples', 6,
'Number of samples to draw from the continuous structured '
'noise.')
flags.DEFINE_integer('unstructured_noise_dims', 62,
'The number of dimensions of the unstructured noise.')
flags.DEFINE_integer('continuous_noise_dims', 2,
'The number of dimensions of the continuous noise.')
flags.DEFINE_integer(
'max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')
FLAGS = flags.FLAGS
def main(_):
hparams = infogan_eval_lib.HParams(
FLAGS.checkpoint_dir, FLAGS.eval_dir, FLAGS.noise_samples,
FLAGS.unstructured_noise_dims, FLAGS.continuous_noise_dims,
FLAGS.max_number_of_evaluations,
FLAGS.write_to_disk)
infogan_eval_lib.evaluate(hparams, run_eval_loop=True)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| tensorflow/gan | tensorflow_gan/examples/mnist/infogan_eval.py | Python | apache-2.0 | 2,490 |
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from six.moves import configparser
import requests
import json
class Credentials(object):
"""
Holds the credentials needed to authenticate requests. In addition
the Credential object knows how to search for credentials and how
to choose the right credentials when multiple credentials are found.
"""
def __init__(self, access_key=None, secret_key=None, token=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
def _search_md(url='http://169.254.169.254/latest/meta-data/iam/'):
d = {}
try:
r = requests.get(url, timeout=.1)
if r.content:
fields = r.content.split('\n')
for field in fields:
if field.endswith('/'):
d[field[0:-1]] = get_iam_role(url + field)
else:
val = requests.get(url + field).content
if val[0] == '{':
val = json.loads(val)
else:
p = val.find('\n')
if p > 0:
val = r.content.split('\n')
d[field] = val
except (requests.Timeout, requests.ConnectionError):
pass
return d
def search_metadata(**kwargs):
credentials = None
metadata = _search_md()
# Assuming there's only one role on the instance profile.
if metadata:
metadata = metadata['iam']['security-credentials'].values()[0]
credentials = Credentials(metadata['AccessKeyId'],
metadata['SecretAccessKey'],
metadata['Token'])
return credentials
def search_environment(**kwargs):
"""
Search for credentials in explicit environment variables.
"""
credentials = None
access_key = os.environ.get(kwargs['access_key_name'].upper(), None)
secret_key = os.environ.get(kwargs['secret_key_name'].upper(), None)
if access_key and secret_key:
credentials = Credentials(access_key, secret_key)
return credentials
def search_file(**kwargs):
"""
If the 'AWS_CREDENTIAL_FILE' environment variable exists, parse that
file for credentials.
"""
credentials = None
if 'AWS_CREDENTIAL_FILE' in os.environ:
persona = kwargs.get('persona', 'default')
access_key_name = kwargs['access_key_name']
secret_key_name = kwargs['secret_key_name']
access_key = secret_key = None
path = os.getenv('AWS_CREDENTIAL_FILE')
path = os.path.expandvars(path)
path = os.path.expanduser(path)
cp = configparser.RawConfigParser()
cp.read(path)
if not cp.has_section(persona):
raise ValueError('Persona: %s not found' % persona)
if cp.has_option(persona, access_key_name):
access_key = cp.get(persona, access_key_name)
else:
access_key = None
if cp.has_option(persona, secret_key_name):
secret_key = cp.get(persona, secret_key_name)
else:
secret_key = None
if access_key and secret_key:
credentials = Credentials(access_key, secret_key)
return credentials
def search_boto_config(**kwargs):
"""
Look for credentials in boto config file.
"""
credentials = access_key = secret_key = None
if 'BOTO_CONFIG' in os.environ:
paths = [os.environ['BOTO_CONFIG']]
else:
paths = ['/etc/boto.cfg', '~/.boto']
paths = [os.path.expandvars(p) for p in paths]
paths = [os.path.expanduser(p) for p in paths]
cp = configparser.RawConfigParser()
cp.read(paths)
if cp.has_section('Credentials'):
access_key = cp.get('Credentials', 'aws_access_key_id')
secret_key = cp.get('Credentials', 'aws_secret_access_key')
if access_key and secret_key:
credentials = Credentials(access_key, secret_key)
return credentials
AllCredentialFunctions = [search_environment,
search_file,
search_boto_config,
search_metadata]
def get_credentials(persona='default'):
for cred_fn in AllCredentialFunctions:
credentials = cred_fn(persona=persona,
access_key_name='access_key',
secret_key_name='secret_key')
if credentials:
break
return credentials
| Shouqun/node-gn | tools/depot_tools/third_party/boto/core/credentials.py | Python | mit | 5,644 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AnswerReference.jurisdiction'
db.add_column('website_answerreference', 'jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True), keep_default=False)
# Changing field 'AnswerReference.template'
db.alter_column('website_answerreference', 'template_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'], null=True))
def backwards(self, orm):
# Deleting field 'AnswerReference.jurisdiction'
db.delete_column('website_answerreference', 'jurisdiction_id')
# Changing field 'AnswerReference.template'
db.alter_column('website_answerreference', 'template_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['website.Template']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'subscribed_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
| solarpermit/solarpermit | website/migrations/0007_auto__add_field_answerreference_jurisdiction__chg_field_answerreferenc.py | Python | bsd-3-clause | 37,751 |
'''
Subversion module Status class submodule
'''
import logging
import subprocess
import sys
from repoman._portage import portage
from portage import os
from portage.const import BASH_BINARY
from portage.output import red, green
from portage import _unicode_encode, _unicode_decode
from repoman._subprocess import repoman_popen
class Status(object):
'''Performs status checks on the svn repository'''
def __init__(self, qatracker, eadded):
'''Class init
@param qatracker: QATracker class instance
@param eadded: list
'''
self.qatracker = qatracker
self.eadded = eadded
def check(self, checkdir, checkdir_relative, xpkg):
'''Perform the svn status check
@param checkdir: string of the directory being checked
@param checkdir_relative: string of the relative directory being checked
@param xpkg: string of the package being checked
@returns: boolean
'''
try:
myf = repoman_popen(
"svn status --depth=files --verbose " +
portage._shell_quote(checkdir))
myl = myf.readlines()
myf.close()
except IOError:
raise
for l in myl:
if l[:1] == "?":
continue
if l[:7] == ' >':
# tree conflict, new in subversion 1.6
continue
l = l.split()[-1]
if l[-7:] == ".ebuild":
self.eadded.append(os.path.basename(l[:-7]))
try:
myf = repoman_popen(
"svn status " +
portage._shell_quote(checkdir))
myl = myf.readlines()
myf.close()
except IOError:
raise
for l in myl:
if l[0] == "A":
l = l.rstrip().split(' ')[-1]
if l[-7:] == ".ebuild":
self.eadded.append(os.path.basename(l[:-7]))
return True
@staticmethod
def detect_conflicts(options):
"""Determine if the checkout has problems like cvs conflicts.
If you want more vcs support here just keep adding if blocks...
This could be better.
TODO(antarus): Also this should probably not call sys.exit() as
repoman is run on >1 packages and one failure should not cause
subsequent packages to fail.
Args:
vcs - A string identifying the version control system in use
Returns: boolean
(calls sys.exit on fatal problems)
"""
cmd = "svn status -u 2>&1 | egrep -v '^. +.*/digest-[^/]+' | head -n-1"
msg = ("Performing a %s with a little magic grep to check for updates."
% green("svn status -u"))
logging.info(msg)
# Use Popen instead of getstatusoutput(), in order to avoid
# unicode handling problems (see bug #310789).
args = [BASH_BINARY, "-c", cmd]
args = [_unicode_encode(x) for x in args]
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = _unicode_decode(proc.communicate()[0])
proc.wait()
mylines = out.splitlines()
myupdates = []
for line in mylines:
if not line:
continue
# [ ] Unmodified (SVN) [U] Updates [P] Patches
# [M] Modified [A] Added [R] Removed / Replaced
# [D] Deleted
if line[0] not in " UPMARD":
# Stray Manifest is fine, we will readd it anyway.
if line[0] == '?' and line[1:].lstrip() == 'Manifest':
continue
logging.error(red(
"!!! Please fix the following issues reported "
"from cvs: %s" % green("(U,P,M,A,R,D are ok)")))
logging.error(red(
"!!! Note: This is a pretend/no-modify pass..."))
logging.error(out)
sys.exit(1)
elif line[8] == '*':
myupdates.append(line[9:].lstrip(" 1234567890"))
if myupdates:
logging.info(green("Fetching trivial updates..."))
if options.pretend:
logging.info("(svn update " + " ".join(myupdates) + ")")
retval = os.EX_OK
else:
retval = os.system("svn update " + " ".join(myupdates))
if retval != os.EX_OK:
logging.fatal("!!! svn exited with an error. Terminating.")
sys.exit(retval)
return False
@staticmethod
def supports_gpg_sign():
'''Does this vcs system support gpg commit signatures
@returns: Boolean
'''
return False
@staticmethod
def isVcsDir(dirname):
'''Does the directory belong to the vcs system
@param dirname: string, directory name
@returns: Boolean
'''
return dirname in [".svn"]
| hackers-terabit/portage | repoman/pym/repoman/modules/vcs/svn/status.py | Python | gpl-2.0 | 4,051 |
from random import choice
minusculas = "abcdefgh"
maiusculas = "ABCDEFGH"
senha = []
pos = 0
while pos < 8:
senha.append(choice(maiusculas))
senha.append(choice(minusculas))
pos += 1
print(''.join(senha))
#https://pt.stackoverflow.com/q/461052/101
| bigown/SOpt | Python/String/PasswordGenerator.py | Python | mit | 262 |
"""
aiDistanceLocator v1.0.2
----------------------------------------------------------------------------------------------------
- create a locator as a reference to adjust camera's focus distance or light attenuation parameters
----------------------------------------------------------------------------------------------------
Tool by Aaron Ibanez
Last update on 25/04/2017
----------------------------------------------------------------------------------------------------
Put script in \Documents\maya\201X\scripts
In Python tab of Maya's script editor, execute code:
import aiDistanceLocator
aiDistanceLocator.UI()
----------------------------------------------------------------------------------------------------
"""
""" IMPORT MODULES """
from functools import partial
import maya.cmds as cmds
import sys
""" DEFINE FUNCTIONS """
####################################################################################################################################
#######################################################____CAMERA LOCATOR____#######################################################
cam = []
## define selection
def camSelection():
global sel, cam
sel = cmds.ls(selection = True, cameras = True, dagObjects = True)
cam = cmds.listRelatives(sel, parent = True, type = "transform")
if len(sel) == 0:
sys.stdout.write("\nNo cameras selected.\n")
## create camera locator
def createCamLocator():
camSelection()
if len(sel) > 0:
for c in cam:
if not cmds.objExists("grp_" + c + "_focusDistance"):
# create a locator and group it
cmds.spaceLocator(name = "lct_" + c + "_focusDistance", position = (0, 0, 0))
annotation = cmds.annotate("lct_" + c + "_focusDistance", text = c + "_focusDistance")
cmds.setAttr(annotation + ".displayArrow", 0)
ann = cmds.listRelatives(annotation, parent = True, type = "transform")
cmds.rename(ann, "ann_" + c + "_focusDistance")
cmds.parent("ann_" + c + "_focusDistance", "lct_" + c + "_focusDistance")
cmds.group("lct_" + c + "_focusDistance", name = "grp_" + c + "_focusDistance")
cmds.select(cam)
# constrain group's position and rotation to camera's
cmds.parentConstraint(c, "grp_" + c + "_focusDistance", name = "grp_" + c + "_focusDistance" + "_parentConstraint", maintainOffset = False, decompRotationToChild = False, skipTranslate = "none", skipRotate = "none")
# conect camera's focus distance attribute to locator's Z position attribute
cmds.expression(string = "lct_" + c + "_focusDistance.translateZ = -(" + c + ".aiFocusDistance)", name = "expression_" + c + "_focusDistance")
# lock transformations
transformations = ["tx", "ty", "tz", "rx", "ry", "rz", "sx", "sy", "sz"]
for t in transformations:
cmds.setAttr("grp_" + c + "_focusDistance" + "." + t, lock = True)
cmds.setAttr("lct_" + c + "_focusDistance" + "." + t, lock = True)
## remove camera locator
def removeCamLocator():
camSelection()
if len(sel) > 0:
for c in cam:
if cmds.objExists("grp_" + c + "_focusDistance"):
cmds.delete("grp_" + c + "_focusDistance", "expression_" + c + "_focusDistance")
## camera locator scale
def camLocatorScale(camLocatorScaleValue, *args):
camSelection()
if len(sel) > 0:
for c in cam:
camLctScale = cmds.intSliderGrp(camLocatorScaleValue, query = True, value = True)
scale = ["localScaleX", "localScaleY"]
for s in scale:
if cmds.objExists("grp_" + c + "_focusDistance"):
cmds.setAttr("lct_" + c + "_focusDistance." + s, camLctScale)
####################################################################################################################################
#########################################################____LIGHT LOCATOR____######################################################
lgt = []
## define selection
def lgtSelection():
global sel, lgt
sel = cmds.ls(selection = True, exactType = ("areaLight", "pointLight", "spotLight", "aiAreaLight", "aiMeshLight", "aiPhotometricLight"), dagObjects = True)
lgt = cmds.listRelatives(sel, parent = True, type = "transform")
if len(sel) == 0:
sys.stdout.write("\nNo lights selected.\n")
## create light locators
def createLgtLocator():
lgtSelection()
if len(sel) > 0:
for l in lgt:
if not cmds.objExists("grp_" + l + "_lightDecay"):
# check for light decay filter used in the current light/s
if len(cmds.ls(exactType = "aiLightDecay")) > 0:
try:
for f in cmds.ls(exactType = "aiLightDecay"):
if f in cmds.listConnections(l + ".aiFilters"):
# create locators and group it
cmds.spaceLocator(name = "lct_" + l + "_nearStart", position = (0, 0, 0))
annotation1 = cmds.annotate("lct_" + l + "_nearStart", text = l + "_nearStart")
cmds.setAttr(annotation1 + ".displayArrow", 0)
ann1 = cmds.listRelatives(annotation1, parent = True, type = "transform")
cmds.rename(ann1, "ann_" + l + "_nearStart")
cmds.parent("ann_" + l + "_nearStart", "lct_" + l + "_nearStart")
cmds.spaceLocator(name = "lct_" + l + "_nearEnd", position = (0, 0, 0))
annotation2 = cmds.annotate("lct_" + l + "_nearEnd", text = l + "_nearEnd")
cmds.setAttr(annotation2 + ".displayArrow", 0)
ann2 = cmds.listRelatives(annotation2, parent = True, type = "transform")
cmds.rename(ann2, "ann_" + l + "_nearEnd")
cmds.parent("ann_" + l + "_nearEnd", "lct_" + l + "_nearEnd")
cmds.spaceLocator(name = "lct_" + l + "_farStart", position = (0, 0, 0))
annotation3 = cmds.annotate("lct_" + l + "_farStart", text = l + "_farStart")
cmds.setAttr(annotation3 + ".displayArrow", 0)
ann3 = cmds.listRelatives(annotation3, parent = True, type = "transform")
cmds.rename(ann3, "ann_" + l + "_farStart")
cmds.parent("ann_" + l + "_farStart", "lct_" + l + "_farStart")
cmds.spaceLocator(name = "lct_" + l + "_farEnd", position = (0, 0, 0))
annotation4 = cmds.annotate("lct_" + l + "_farEnd", text = l + "_farEnd")
cmds.setAttr(annotation4 + ".displayArrow", 0)
ann4 = cmds.listRelatives(annotation4, parent = True, type = "transform")
cmds.rename(ann4, "ann_" + l + "_farEnd")
cmds.parent("ann_" + l + "_farEnd", "lct_" + l + "_farEnd")
cmds.group("lct_" + l + "_nearStart", "lct_" + l + "_nearEnd", "lct_" + l + "_farStart", "lct_" + l + "_farEnd", name = "grp_" + l + "_lightDecay")
cmds.select(lgt)
# constrain group's position and rotation to light's
cmds.parentConstraint(l, "grp_" + l + "_lightDecay", name = "grp_" + l + "_lightDecay" + "_parentConstraint", maintainOffset = False, decompRotationToChild = False, skipTranslate = "none", skipRotate = "none")
# conect light's decay attribute to locator's Z local position attribute
cmds.expression(string = "lct_" + l + "_nearStart.translateZ = -(" + f + ".nearStart)", name = "expression_" + l + "_nearStart")
cmds.expression(string = "lct_" + l + "_nearEnd.translateZ = -(" + f + ".nearEnd)", name = "expression_" + l + "_nearEnd")
cmds.expression(string = "lct_" + l + "_farStart.translateZ = -(" + f + ".farStart)", name = "expression_" + l + "_farStart")
cmds.expression(string = "lct_" + l + "_farEnd.translateZ = -(" + f + ".farEnd)", name = "expression_" + l + "_farEnd")
# lock transformations
transformations = ["tx", "ty", "tz", "rx", "ry", "rz", "sx", "sy", "sz"]
for t in transformations:
cmds.setAttr("grp_" + l + "_lightDecay" + "." + t, lock = True)
cmds.setAttr("lct_" + l + "_nearStart" + "." + t, lock = True)
cmds.setAttr("lct_" + l + "_nearEnd" + "." + t, lock = True)
cmds.setAttr("lct_" + l + "_farStart" + "." + t, lock = True)
cmds.setAttr("lct_" + l + "_farEnd" + "." + t, lock = True)
else:
sys.stdout.write("\nSelected lights are not using any aiLightDecay filter.\n")
except TypeError:
sys.stdout.write("\nSelected lights are not using any aiLightDecay filter.\n")
else:
sys.stdout.write("\nSelected lights are not using any aiLightDecay filter.\n")
## remove light locators
def removeLgtLocator():
lgtSelection()
if len(sel) > 0:
for l in lgt:
if cmds.objExists("grp_" + l + "_lightDecay"):
cmds.delete("grp_" + l + "_lightDecay", "expression_" + l + "_nearStart", "expression_" + l + "_nearEnd", "expression_" + l + "_farStart", "expression_" + l + "_farEnd")
## light locators scale
def lgtLocatorScale(lgtLocatorScaleValue, *args):
lgtSelection()
if len(sel) > 0:
for l in lgt:
if cmds.objExists("grp_" + l + "_lightDecay"):
lgtLctScale = cmds.intSliderGrp(lgtLocatorScaleValue, query = True, value = True)
scale = ["localScaleX", "localScaleY"]
for s in scale:
decayAttr = ["nearStart", "nearEnd", "farStart", "farEnd"]
for a in decayAttr:
cmds.setAttr("lct_" + l + "_" + a + "." + s, lgtLctScale)
## light locators visibility
def lgtLocatorVisibility(decayAttrClass, visibilityStatus):
lgtSelection()
if len(sel) > 0:
for l in lgt:
if cmds.objExists("grp_" + l + "_lightDecay"):
decayAttr = ["nearStart", "nearEnd", "farStart", "farEnd"]
cmds.setAttr("lct_" + l + "_" + decayAttr[decayAttrClass] + ".visibility", visibilityStatus)
""" DEFINE UI """
def UI():
####################################################################################################################################
##################################################____DEFINE GLOBAL VARIABLES____###################################################
global camLocatorScaleValue, lgtLocatorScaleValue
####################################################################################################################################
#########################################################____UI CREATION____########################################################
if cmds.window("aiDistanceLocatorWindow", exists = True):
cmds.deleteUI("aiDistanceLocatorWindow")
#cmds.windowPref("aiDistanceLocatorWindow", remove = True)
win = cmds.window("aiDistanceLocatorWindow", title = "aiDistanceLocator", width = 400, height = 175, sizeable = False)
mainLayout = cmds.columnLayout()
####################################################################################################################################
#######################################################____CAMERA LOCATOR____#######################################################
layA = cmds.rowColumnLayout(numberOfColumns = 3, columnWidth = ([1, 100], [2, 150], [3, 150]), parent = mainLayout)
## add/delete camera locator
cmds.text(label = " camera\n focusDistance", font = "boldLabelFont", align = "left")
cmds.button(label = "ADD", height = 40, command = "aiDistanceLocator.createCamLocator()")
cmds.button(label = "DEL", height = 40, command = "aiDistanceLocator.removeCamLocator()")
layB = cmds.rowColumnLayout(numberOfColumns = 2, columnWidth = ([1, 100], [2, 300]), parent = mainLayout)
## camera locator scale
cmds.text(label = " locator scale:", align = "left")
camLocatorScaleValue = cmds.intSliderGrp(field = True, value = 1, min = 1, max = 10, fieldMaxValue = 1000)
cmds.intSliderGrp(camLocatorScaleValue, edit = True, changeCommand = partial(camLocatorScale, camLocatorScaleValue))
cmds.separator(height = 20, style = "none", parent = mainLayout)
####################################################################################################################################
#########################################################____LIGHT LOCATOR____######################################################
layC = cmds.rowColumnLayout(numberOfColumns = 3, columnWidth = ([1, 100], [2, 150], [3, 150]), parent = mainLayout)
## add/delete light locator
cmds.text(label = " lightDecay", font = "boldLabelFont", align = "left")
cmds.button(label = "ADD", height = 40, command = "aiDistanceLocator.createLgtLocator()")
cmds.button(label = "DEL", height = 40, command = "aiDistanceLocator.removeLgtLocator()")
layD = cmds.rowColumnLayout(numberOfColumns = 2, columnWidth = ([1, 100], [2, 300]), parent = mainLayout)
## light locators scale
cmds.text(label = " locator scale:", align = "left")
lgtLocatorScaleValue = cmds.intSliderGrp(field = True, value = 1, min = 1, max = 10, fieldMaxValue = 1000)
cmds.intSliderGrp(lgtLocatorScaleValue, edit = True, changeCommand = partial(lgtLocatorScale, lgtLocatorScaleValue))
layE = cmds.rowColumnLayout(numberOfColumns = 5, columnWidth = ([1, 100], [2, 75], [3, 75], [4, 75], [5, 75]), parent = mainLayout)
## light locators visibility
cmds.text(label = " locator visibility:", align = "left")
cmds.checkBox(label = "nearStart", value = 1, onCommand = "aiDistanceLocator.lgtLocatorVisibility(0, 1)", offCommand = "aiDistanceLocator.lgtLocatorVisibility(0, 0)")
cmds.checkBox(label = "nearEnd", value = 1, onCommand = "aiDistanceLocator.lgtLocatorVisibility(1, 1)", offCommand = "aiDistanceLocator.lgtLocatorVisibility(1, 0)")
cmds.checkBox(label = "farStart", value = 1, onCommand = "aiDistanceLocator.lgtLocatorVisibility(2, 1)", offCommand = "aiDistanceLocator.lgtLocatorVisibility(2, 0)")
cmds.checkBox(label = "farEnd", value = 1, onCommand = "aiDistanceLocator.lgtLocatorVisibility(3, 1)", offCommand = "aiDistanceLocator.lgtLocatorVisibility(3, 0)")
# open UI if current renderer is Arnold
if cmds.getAttr("defaultRenderGlobals.currentRenderer") == "arnold":
cmds.showWindow(win)
else:
cmds.confirmDialog(title = "Warning", message = "Set current renderer to Arnold!", icon = "warning")
| aaibfer/mtoaUtils | scripts/aiDistanceLocator.py | Python | mit | 13,950 |
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# JavaScript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "{{ project_name }}"
# Search
# https://docs.wagtail.org/en/stable/topics/search/backends.html
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.search.backends.database',
}
}
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
| zerolab/wagtail | wagtail/project_template/project_name/settings/base.py | Python | bsd-3-clause | 4,622 |
#
# Martin Gracik <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.errors import KickstartParseError, KickstartValueError
class F19_TestCase(CommandTest):
def runTest(self):
# pass
self.assert_parse("liveimg --url=http://someplace/somewhere --proxy=http://wherever/other "
"--noverifyssl --checksum=e7a9fe500330a1cae4ca114833bb3df014e6d14e63ea9566896a848f3832d0ba",
"liveimg --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\" "
"--noverifyssl --checksum=\"e7a9fe500330a1cae4ca114833bb3df014e6d14e63ea9566896a848f3832d0ba\"\n")
self.assert_parse("liveimg --url=http://someplace/somewhere --proxy=http://wherever/other "
"--noverifyssl",
"liveimg --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\" "
"--noverifyssl\n")
self.assert_parse("liveimg --url=http://someplace/somewhere --proxy=http://wherever/other ",
"liveimg --url=\"http://someplace/somewhere\" --proxy=\"http://wherever/other\"\n")
self.assert_parse("liveimg --url=http://someplace/somewhere",
"liveimg --url=\"http://someplace/somewhere\"\n")
# equality
self.assertEqual(self.assert_parse("liveimg --url=http://one"), self.assert_parse("liveimg --url=http://one"))
self.assertEqual(self.assert_parse("liveimg --url=http://one --proxy=http://wherever"), self.assert_parse("liveimg --url=http://one --proxy=http://wherever"))
self.assertEqual(self.assert_parse("liveimg --url=http://one --noverifyssl"), self.assert_parse("liveimg --url=http://one --noverifyssl"))
self.assertEqual(self.assert_parse("liveimg --url=http://one --checksum=deadbeef"), self.assert_parse("liveimg --url=http://one --checksum=deadbeef"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one"), self.assert_parse("liveimg --url=http://two"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one --proxy=http://wherever"), self.assert_parse("liveimg --url=http://two"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one --proxy=http://wherever"), self.assert_parse("liveimg --url=http://one, --proxy=http://somewhere"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one --noverifyssl"), self.assert_parse("liveimg --url=http://one"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one --checksum=deadbeef"), self.assert_parse("liveimg --url=http://one"))
self.assertNotEqual(self.assert_parse("liveimg --url=http://one --checksum=deadbeef"), self.assert_parse("liveimg --url=http://one --checksum=abababab"))
self.assertFalse(self.assert_parse("liveimg --url=http://one") == None)
# fail
self.assert_parse_error("liveimg", KickstartValueError)
self.assert_parse_error("liveimg --url", KickstartParseError)
self.assert_parse_error("liveimg --url=http://someplace/somewhere --proxy", KickstartParseError)
self.assert_parse_error("liveimg --proxy=http://someplace/somewhere", KickstartValueError)
self.assert_parse_error("liveimg --noverifyssl", KickstartValueError)
self.assert_parse_error("liveimg --checksum=e7a9fe500330a1cae4ca114833bb3df014e6d14e63ea9566896a848f3832d0ba", KickstartValueError)
if __name__ == "__main__":
unittest.main()
| pbokoc/pykickstart | tests/commands/liveimg.py | Python | gpl-2.0 | 4,453 |
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from .index import CategoryIndex, CategoryMediaIndex
log = CPLog(__name__)
class CategoryPlugin(Plugin):
_database = {
'category': CategoryIndex,
'category_media': CategoryMediaIndex,
}
def __init__(self):
addApiView('category.save', self.save)
addApiView('category.save_order', self.saveOrder)
addApiView('category.delete', self.delete)
addApiView('category.list', self.allView, docs = {
'desc': 'List all available categories',
'return': {'type': 'object', 'example': """{
'success': True,
'categories': array, categories
}"""}
})
addEvent('category.all', self.all)
addEvent('category.first', self.first)
addEvent('category.first_as_default', self.firstAsDefault)
def allView(self, **kwargs):
return {
'success': True,
'categories': self.all()
}
def all(self):
db = get_db()
categories = db.all('category', with_doc = True)
return [x['doc'] for x in categories]
def first(self):
db = get_db()
return list(db.all('category', limit = 1, with_doc = True))[0]['doc']
def firstAsDefault(self):
return self.conf('first_as_default', default = False, section = 'categories')
def save(self, **kwargs):
try:
db = get_db()
category = {
'_t': 'category',
'order': kwargs.get('order', 999),
'label': toUnicode(kwargs.get('label', '')),
'ignored': toUnicode(kwargs.get('ignored', '')),
'preferred': toUnicode(kwargs.get('preferred', '')),
'required': toUnicode(kwargs.get('required', '')),
'destination': toUnicode(kwargs.get('destination', '')),
}
try:
c = db.get('id', kwargs.get('id'))
category['order'] = c.get('order', category['order'])
c.update(category)
db.update(c)
except:
c = db.insert(category)
c.update(category)
return {
'success': True,
'category': c
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False,
'category': None
}
def saveOrder(self, **kwargs):
try:
db = get_db()
order = 0
for category_id in kwargs.get('ids', []):
c = db.get('id', category_id)
c['order'] = order
db.update(c)
order += 1
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def delete(self, id = None, **kwargs):
try:
db = get_db()
success = False
message = ''
try:
c = db.get('id', id)
db.delete(c)
# Force defaults on all empty category movies
self.removeFromMovie(id)
success = True
except:
message = log.error('Failed deleting category: %s', traceback.format_exc())
return {
'success': success,
'message': message
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def removeFromMovie(self, category_id):
try:
db = get_db()
movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)]
if len(movies) > 0:
for movie in movies:
movie['category_id'] = None
db.update(movie)
except:
log.error('Failed: %s', traceback.format_exc())
| gchaimovitz/CouchPotatoServer | couchpotato/core/plugins/category/main.py | Python | gpl-3.0 | 4,316 |
import re
from magichour.api.local.util.namedtuples import DistributedLogLine
from magichour.api.local.util.namedtuples import DistributedTransformLine
from magichour.api.local.util.log import log_time
def transformLine(line):
'''
process transformations into RDD format
Args:
line(string): line from the transform defintion file.
lines beginning with # are considered comments
and will need to be removed
Returns:
retval(TransformLine): namedTuple representation of the tasking
'''
if line.lstrip()[0] != '#':
# id,type,name,transform
l = line.lstrip().rstrip().split(',', 3)
return DistributedTransformLine(int(l[0]),
l[1],
l[2],
l[3],
re.compile(l[3]))
else:
return DistributedTransformLine('COMMENT',
'COMMENT',
'COMMENT',
'COMMENT',
'COMMENT')
def lineRegexReplacement(line, logTrans):
'''
apply a list of regex replacements to a line, make note of
all the remplacements peformed in a dictionary(list)
Args:
line(DistributedLogLine): logline to work on
Globals:
transforms(RDD(TransformLine)): replacemnts to make with
Returns:
retval(DistributedLogLine): logline with the processed
and dictionary portions filled in
'''
text = line.text.strip()
replaceDict = dict()
for t in logTrans.value:
if t.type == 'REPLACE':
replaceList = t.compiled.findall(text)
if replaceList:
replaceDict[t.name] = replaceList
text = t.compiled.sub(t.name, text, 0)
if t.type == 'REPLACELIST':
print 'REPLACELIST not implemented yet'
processed = ' '.join(text.split())
retVal = DistributedLogLine(line.ts,
line.text.lstrip().rstrip(),
processed.lstrip().rstrip(),
replaceDict,
None,
None,
None)
return retVal
def readTransforms(sc, transFile):
'''
returns a list of transforms for replacement processing
Args:
sc(sparkContext): spark context
transFile(string): uri to the transform file in HDFS
Returns:
retval(list(TransformLine))
'''
# map the transFile
simpleTransformations = sc.textFile(transFile)
# parse loglines
logTransforms = simpleTransformations.map(transformLine).cache()
trans = logTransforms.collect()
lTrans = list()
for t in trans:
if t.id != 'COMMENT':
lTrans.append(t)
return lTrans
def logPreProcess(sc, logTrans, rrdLogLine):
'''
take a series of loglines and pre-process the lines
replace ipaddresses, directories, urls, etc with constants
keep a dictionary of the replacements done to the line
Args:
sc(sparkContext): spark context
logTrans(string): location fo the transFile in HDFS
logFile(string): location of the log data in HDFS
Returns:
retval(RDD(DistributedLogLines)): preprocessed log lines ready for next
stage of processing
'''
# following done to make sure that the broadcast gets to the function
return rrdLogLine.map(lambda line: lineRegexReplacement(line, logTrans))
@log_time
def preprocess_rdd(sc, logTrans, rrdLogLine):
'''
make a rdd of preprocessed loglines
Args:
sc(sparkContext): sparkContext
logTrans(string): location fo the transFile in HDFS
logFile(string): location of the log data in HDFS
Returns:
retval(RDD(DistributedLogLines)): preprocessed log lines ready for
next stage of processing
'''
lTrans = readTransforms(sc, logTrans)
logTrans = sc.broadcast(lTrans)
return logPreProcess(sc, logTrans, rrdLogLine)
| d-grossman/magichour | magichour/api/dist/preprocess/preProcess.py | Python | apache-2.0 | 4,263 |
from fabric.api import task # , env, cd
# import the fabric tasks and templates from cotton
import cotton.fabfile as cotton
# load application-specific settings from this module
cotton.set_fabric_env('cotton_settings')
@task
def init():
"""
Initialize the app deployment
"""
cotton.install()
#cotton.create_project()
cotton.install_iojs()
cotton.upload_template_and_reload('defaults')
cotton.upload_template_and_reload('marvin.sh')
cotton.upload_template_and_reload('supervisor_marvin')
# make a directory to hold the file-brain
cotton.sudo("mkdir -p /var/hubot")
cotton.sudo("chown marvin:marvin /var/hubot")
@task
def ship():
"""
Deploy the current branch to production
"""
cotton.git_push()
cotton.install_iojs_dependencies()
cotton.upload_template_and_reload('marvin.sh')
cotton.upload_template_and_reload('supervisor_marvin')
| evilchili/hugops-marvin | build/fabfile.py | Python | mit | 916 |
"""
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals
from apocalypse.utils.vaurien_utils import (VaurienConfigFileParser
as ConfigFileParser)
from apocalypse.utils.vaurien_utils import settings_dict
__all__ = ["ConfigFileParser", "DEFAULT_SETTINGS"]
_marker = []
DEFAULT_VALS = {
# Generic settings
"chaos.enabled ": True,
"chaos.background_run": False,
"chaos.cloud": "minicloud",
"chaos.machine": "dev",
"chaos.error_threshold": 10,
"chaos.trigger_every": 10,
# Actions
# Resource
# Burn Cpu
"actions.burn_cpu.enabled": True,
"burn_cpu.load": 0.9,
"burn_cpu.load_duration": 30,
"burn_cpu.vm_instances": None,
# Burn IO
"actions.burn_io.enabled": False,
# Burn RAM
"actions.burn_ram.enabled": False,
"burn_ram.load": 0.9,
"burn_ram.load_duration": 30,
"burn_ram.vm_instances": None,
# Burn Disk
"actions.burn_disk.enabled": False,
# Generic
# Kill process
"actions.kill_process.enabled": True,
"kill_process.id": None,
"kill_process.signal": None,
"kill_process.vm_instances": None,
"actions.shutdown.enabled": True,
"actions.reboot.enabled": True,
"actions.terminate.enabled": False,
# Network
"actions.network_blackout.enabled": False,
"actions.network_error.enabled": False,
"actions.network_delay.enabled": False,
"actions.network_hang.enabled": False,
"actions.network_transient.enabled": False,
"actions.network_abort.enabled": False,
}
DEFAULT_SETTINGS = settings_dict(DEFAULT_VALS)
| dhoomakethu/apocalypse | apocalypse/utils/config_parser.py | Python | mit | 1,634 |
from datetime import datetime
from decimal import Decimal
from urllib.error import URLError
import mechanicalsoup
from django.test import TestCase
from lxml.builder import E
from mock import MagicMock, Mock
from currency_convert import convert
from currency_convert.factory.currency_convert_factory import (
MonthlyAverageFactory
)
from currency_convert.imf_rate_parser import RateBrowser, RateParser
from currency_convert.models import MonthlyAverage
from iati_codelists.models import Currency
class RateBrowserTestCase(TestCase):
def setUp(self):
"""
"""
self.rate_browser = RateBrowser()
def test_prepare_browser(self):
"""
test if returns a browser
"""
self.assertTrue(isinstance(
self.rate_browser.browser, mechanicalsoup.Browser))
def test_retry_on_urlerror(self):
"""
should retry 2 times when receiving an URL error
"""
self.rate_browser.browser.open = Mock(
side_effect=URLError('cant connect...'))
class RateParserTestCase(TestCase):
def create_rate_value_elem(self, value, currency_name, currency_iso):
return E(
'RATE_VALUE', str(value), CURRENCY_CODE=currency_name,
ISO_CHAR_CODE=currency_iso)
def create_effective_date_elem(self, date_value, rate_values):
effective_date = E('EFFECTIVE_DATE', VALUE=date_value)
effective_date.append(
self.create_rate_value_elem(rate_values, 'Euro', 'EUR'))
effective_date.append(self.create_rate_value_elem(
rate_values, 'Dollar', 'USD'))
return effective_date
def setUp(self):
"""
create 1 root element, which contains
2 effective date elements, which contains
2 rate value elements, 1st with exchange rates 1.5, second with rate
2.0
"""
effective_date = self.create_effective_date_elem(
'02-Jan-1997', Decimal(1.5))
effective_date_2 = self.create_effective_date_elem(
'03-Jan-1997', Decimal(2.00000))
root_elem = E('EXCHANGE_RATE_REPORT')
root_elem.append(effective_date)
root_elem.append(effective_date_2)
self.rate_parser = RateParser()
self.rate_parser.now = datetime(1995, 1, 31)
self.effective_date = effective_date
self.root_elem = root_elem
def test_prepare_url(self):
self.rate_parser.min_tick = 8888
self.rate_parser.max_tick = 7777
url = self.rate_parser.prepare_url()
self.assertTrue('8888' in url,
"From not set in url")
self.assertTrue('7777' in url,
"To not set in url")
def test_parse_day_rates(self):
self.rate_parser.parse_day_rates(self.effective_date)
self.assertEqual(2, len(self.rate_parser.rates))
self.assertTrue('EUR' in self.rate_parser.rates)
self.assertTrue('USD' in self.rate_parser.rates)
self.assertTrue(self.rate_parser.rates['EUR'].get('values')[0] == 1.5)
def test_parse_data(self):
"""
"""
self.rate_parser.parse_day_rates = MagicMock()
self.rate_parser.parse_data(self.root_elem)
self.assertEqual(self.rate_parser.parse_day_rates.call_count, 2)
def test_save_averages(self):
self.rate_parser.parse_data(self.root_elem)
self.rate_parser.save_averages()
average_item = MonthlyAverage.objects.filter(
month=12, year=1993, currency='EUR')[0]
self.assertTrue(average_item.value == 1.75)
def test_ticks(self):
dt = datetime(1994, 1, 1)
ticks = self.rate_parser.ticks(dt)
self.assertEqual(ticks, 628929792000000000)
def test_set_tick_rates(self):
self.rate_parser.year = 1994
self.rate_parser.month = 1
self.rate_parser.set_tick_rates()
self.assertEqual(self.rate_parser.min_tick, 628929792000000000)
self.assertEqual(self.rate_parser.max_tick, 628955712000000000)
def test_reset_data(self):
self.rate_parser.rates = {'currencies': 'averages'}
self.rate_parser.reset_data()
self.assertEqual(self.rate_parser.rates, {})
def test_create_browser(self):
browser = self.rate_parser.create_browser()
self.assertTrue(isinstance(browser, RateBrowser))
def test_update_rates(self):
currency, created = Currency.objects.get_or_create(
code='EUR', name='Euro')
MonthlyAverageFactory.create(
year=1994, month=1, currency=currency, value=1)
self.rate_parser.create_browser = MagicMock()
self.rate_parser.parse_data = MagicMock()
self.rate_parser.save_averages = MagicMock()
self.rate_parser.update_rates(force=False)
self.assertEqual(12, self.rate_parser.parse_data.call_count)
def test_force_update_rates(self):
currency, created = Currency.objects.get_or_create(
code='EUR', name='Euro')
MonthlyAverageFactory.create(
year=1994, month=1, currency=currency, value=1)
self.rate_parser.create_browser = MagicMock()
self.rate_parser.parse_data = MagicMock()
self.rate_parser.save_averages = MagicMock()
self.rate_parser.update_rates(force=True)
self.assertEqual(13, self.rate_parser.create_browser.call_count)
self.assertEqual(13, self.rate_parser.parse_data.call_count)
self.assertEqual(13, self.rate_parser.save_averages.call_count)
class ConvertTestCase(TestCase):
def setUp(self):
currency, created = Currency.objects.get_or_create(
code='EUR', name='Euro')
MonthlyAverageFactory.create(
year=1994, month=1, currency=currency, value=1.5)
usd_currency, created = Currency.objects.get_or_create(
code='USD', name='USD')
MonthlyAverageFactory.create(
year=1994, month=1, currency=usd_currency, value=3)
def test_currency_from_to(self):
"""
"""
value_date = datetime(1994, 1, 1)
rate = convert.currency_from_to('USD', 'EUR', value_date, 200)
self.assertEqual(rate, 400)
def test_currency_from_to_xdr(self):
"""
when converted to xdr, only to_xdr should be called.
"""
value_date = datetime(1994, 1, 1)
rate = convert.currency_from_to('USD', 'XDR', value_date, 100)
self.assertEqual(rate, 300)
def test_currency_from_to_does_not_exist(self):
"""
"""
value_date = datetime(1995, 1, 1)
rate = convert.currency_from_to('USD', 'UGX', value_date, 100)
self.assertEqual(rate, 0)
def test_to_xdr(self):
"""
"""
value_date = datetime(1994, 1, 1)
rate = convert.to_xdr('EUR', value_date, 100)
self.assertEqual(rate, 150)
def test_to_xdr_does_not_exist(self):
"""
"""
value_date = datetime(1995, 1, 1)
rate = convert.to_xdr('EUR', value_date, 100)
self.assertEqual(rate, 0)
def test_from_xdr(self):
"""
"""
value_date = datetime(1994, 1, 1)
rate = convert.from_xdr('EUR', value_date, 150)
self.assertEqual(rate, 100)
def test_from_xdr_does_not_exist(self):
"""
"""
value_date = datetime(1995, 1, 1)
rate = convert.from_xdr('EUR', value_date, 100)
self.assertEqual(rate, 0)
| openaid-IATI/OIPA | OIPA/currency_convert/tests.py | Python | agpl-3.0 | 7,477 |
import argparse
import time
import subprocess
import logging
from deep_architect import search_logging as sl
from deep_architect import utils as ut
from deep_architect.contrib.communicators.mongo_communicator import MongoCommunicator
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
logging.basicConfig(format='[%(levelname)s] %(asctime)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
BUCKET_NAME = 'deep_architect'
RESULTS_TOPIC = 'results'
ARCH_TOPIC = 'architectures'
KILL_SIGNAL = 'kill'
PUBLISH_SIGNAL = 'publish'
def process_config_and_args():
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=
'/deep_architect/examples/contrib/kubernetes/experiment_config.json')
parser.add_argument('--bucket',
'-b',
action='store',
dest='bucket',
default=BUCKET_NAME)
# Other arguments
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
parser.add_argument('--mongo-host',
'-m',
action='store',
dest='mongo_host',
default='127.0.0.1')
parser.add_argument('--mongo-port',
'-p',
action='store',
dest='mongo_port',
default=27017)
parser.add_argument('--log',
choices=['debug', 'info', 'warning', 'error'],
default='info')
parser.add_argument('--repetition', default=0)
options = parser.parse_args()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log)
logging.getLogger().setLevel(numeric_level)
configs = ut.read_jsonfile(options.config_file)
config = configs[options.config_name]
config['bucket'] = options.bucket
comm = MongoCommunicator(host=options.mongo_host,
port=options.mongo_port,
data_refresher=True,
refresh_period=10)
datasets = {
'cifar10': ('data/cifar10/', 10),
}
_, num_classes = datasets[config['dataset']]
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
config['save_every'] = 1 if 'save_every' not in config else config[
'save_every']
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
config['num_epochs'] = -1 if 'epochs' not in config else config['epochs']
config['num_samples'] = -1 if 'samples' not in config else config['samples']
# SET UP GOOGLE STORE FOLDER
config['search_name'] = config['search_name'] + '_' + str(
options.repetition)
search_logger = sl.SearchLogger(config['search_folder'],
config['search_name'])
search_data_folder = search_logger.get_search_data_folderpath()
config['save_filepath'] = ut.join_paths(
(search_data_folder, config['searcher_file_name']))
config['eval_path'] = sl.get_all_evaluations_folderpath(
config['search_folder'], config['search_name'])
config['full_search_folder'] = sl.get_search_folderpath(
config['search_folder'], config['search_name'])
config['eval_hparams'] = {} if 'eval_hparams' not in config else config[
'eval_hparams']
state = {
'epochs': 0,
'models_sampled': 0,
'finished': 0,
'best_accuracy': 0.0
}
if options.resume:
try:
download_folder(search_data_folder, config['full_search_folder'],
config['bucket'])
searcher.load_state(search_data_folder)
if ut.file_exists(config['save_filepath']):
old_state = ut.read_jsonfile(config['save_filepath'])
state['epochs'] = old_state['epochs']
state['models_sampled'] = old_state['models_sampled']
state['finished'] = old_state['finished']
state['best_accuracy'] = old_state['best_accuracy']
except:
pass
return comm, search_logger, searcher, state, config
def download_folder(folder, location, bucket):
logger.info('Downloading gs://%s/%s to %s/', bucket, folder, location)
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', 'gs://' + bucket + '/' + folder,
location + '/'
])
def upload_folder(folder, location, bucket):
subprocess.check_call([
'gsutil', '-m', 'cp', '-r', folder,
'gs://' + bucket + '/' + location + '/'
])
def get_topic_name(topic, config):
return config['search_folder'] + '_' + config['search_name'] + '_' + topic
def update_searcher(message, comm, search_logger, searcher, state, config):
data = message['data']
if not data == PUBLISH_SIGNAL:
results = data['results']
vs = data['vs']
evaluation_id = data['evaluation_id']
searcher_eval_token = data['searcher_eval_token']
log_results(results, vs, evaluation_id, searcher_eval_token,
search_logger, config)
searcher.update(results['validation_accuracy'], searcher_eval_token)
update_searcher_state(state, config, results)
save_searcher_state(searcher, state, config, search_logger)
publish_new_arch(comm, searcher, state, config)
comm.finish_processing(get_topic_name(RESULTS_TOPIC, config), message)
def save_searcher_state(searcher, state, config, search_logger):
logger.info('Models finished: %d Best Accuracy: %f', state['finished'],
state['best_accuracy'])
searcher.save_state(search_logger.get_search_data_folderpath())
state = {
'finished': state['finished'],
'models_sampled': state['models_sampled'],
'epochs': state['epochs'],
'best_accuracy': state['best_accuracy']
}
ut.write_jsonfile(state, config['save_filepath'])
upload_folder(search_logger.get_search_data_folderpath(),
config['full_search_folder'], config['bucket'])
return state
def update_searcher_state(state, config, results):
state['best_accuracy'] = max(state['best_accuracy'],
results['validation_accuracy'])
state['finished'] += 1
state['epochs'] += config['eval_epochs']
def log_results(results, vs, evaluation_id, searcher_eval_token, search_logger,
config):
logger.info("Updating searcher with evaluation %d and results %s",
evaluation_id, str(results))
eval_logger = search_logger.get_evaluation_logger(evaluation_id)
eval_logger.log_config(vs, searcher_eval_token)
eval_logger.log_results(results)
upload_folder(eval_logger.get_evaluation_folderpath(), config['eval_path'],
config['bucket'])
def publish_new_arch(comm, searcher, state, config):
while comm.check_data_exists(get_topic_name(ARCH_TOPIC, config),
'evaluation_id', state['models_sampled']):
state['models_sampled'] += 1
if should_end_searcher(state, config):
logger.info('Search finished, sending kill signal')
comm.publish(get_topic_name(ARCH_TOPIC, config), KILL_SIGNAL)
state['search_finished'] = True
elif should_continue(state, config):
logger.info('Publishing architecture number %d',
state['models_sampled'])
_, _, vs, searcher_eval_token = searcher.sample()
arch = {
'vs': vs,
'evaluation_id': state['models_sampled'],
'searcher_eval_token': searcher_eval_token,
'eval_hparams': config['eval_hparams']
}
comm.publish(get_topic_name(ARCH_TOPIC, config), arch)
state['models_sampled'] += 1
def should_continue(state, config):
cont = config[
'num_samples'] == -1 or state['models_sampled'] < config['num_samples']
cont = cont and (config['num_epochs'] == -1 or
state['epochs'] < config['num_epochs'])
return cont
def should_end_searcher(state, config):
kill = config['num_samples'] != -1 and state['finished'] >= config[
'num_samples']
kill = kill or (config['num_epochs'] != -1 and
state['epochs'] >= config['num_epochs'])
return kill
def main():
comm, search_logger, searcher, state, config = process_config_and_args()
logger.info('Using config %s', str(config))
logger.info('Current state %s', str(state))
state['search_finished'] = False
comm.subscribe(get_topic_name(RESULTS_TOPIC, config),
callback=lambda message: update_searcher(
message, comm, search_logger, searcher, state, config))
while not state['search_finished']:
time.sleep(30)
comm.unsubscribe(get_topic_name(RESULTS_TOPIC, config))
if __name__ == "__main__":
main()
| negrinho/deep_architect | examples/contrib/kubernetes/master.py | Python | mit | 9,581 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(
fallback=(env_fallback, ['F5_SERVER'])
),
'server_port': dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'user': dict(
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'ssh_keyfile': dict(
fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
type='path'
),
'validate_certs': dict(
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'transport': dict(
default='rest',
choices=['cli', 'rest']
),
'timeout': dict(type='int'),
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_SERVER'])
),
'user': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
removed_in_version=2.9,
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'validate_certs': dict(
removed_in_version=2.9,
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'server_port': dict(
removed_in_version=2.9,
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'transport': dict(
removed_in_version=2.9,
default='rest',
choices=['cli', 'rest']
)
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
def load_params(params):
provider = params.get('provider') or dict()
for key, value in iteritems(provider):
if key in f5_argument_spec:
if params.get(key) is None and value is not None:
params[key] = value
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
raise F5ModuleError(to_text(err, errors='surrogate_then_replace'))
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def cleanup_tokens(client):
try:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception:
pass
def is_cli(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
result = 'cli' in (transport, provider_transport)
return result
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
load_params(self.params)
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self.api = self.mgmt
class AnsibleF5Parameters(object):
def __init__(self, *args, **kwargs):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
self.client = kwargs.pop('client', None)
params = kwargs.pop('params', None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| wilvk/ansible | lib/ansible/module_utils/network/f5/common.py | Python | gpl-3.0 | 8,051 |
# pylint: skip-file
# flake8: noqa
class RouterException(Exception):
''' Router exception'''
pass
class RouterConfig(OpenShiftCLIConfig):
''' RouterConfig is a DTO for the router. '''
def __init__(self, rname, namespace, kubeconfig, router_options):
super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
class Router(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self,
router_config,
verbose=False):
''' Constructor for OpenshiftOC
a router consists of 3 or more parts
- dc/router
- svc/router
- sa/router
- secret/router-certs
- clusterrolebinding/router-router-role
'''
super(Router, self).__init__('default', router_config.kubeconfig, verbose)
self.config = router_config
self.verbose = verbose
self.router_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
{'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
{'kind': 'secret', 'name': self.config.name + '-certs'},
{'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
]
self.__prepared_router = None
self.dconfig = None
self.svc = None
self._secret = None
self._serviceaccount = None
self._rolebinding = None
@property
def prepared_router(self):
''' property for the prepared router'''
if self.__prepared_router is None:
results = self._prepare_router()
if not results or 'returncode' in results and results['returncode'] != 0:
if 'stderr' in results:
raise RouterException('Could not perform router preparation: %s' % results['stderr'])
raise RouterException('Could not perform router preparation.')
self.__prepared_router = results
return self.__prepared_router
@prepared_router.setter
def prepared_router(self, obj):
'''setter for the prepared_router'''
self.__prepared_router = obj
@property
def deploymentconfig(self):
''' property deploymentconfig'''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for property deploymentconfig '''
self.dconfig = config
@property
def service(self):
''' property for service '''
return self.svc
@service.setter
def service(self, config):
''' setter for property service '''
self.svc = config
@property
def secret(self):
''' property secret '''
return self._secret
@secret.setter
def secret(self, config):
''' setter for property secret '''
self._secret = config
@property
def serviceaccount(self):
''' property for serviceaccount '''
return self._serviceaccount
@serviceaccount.setter
def serviceaccount(self, config):
''' setter for property serviceaccount '''
self._serviceaccount = config
@property
def rolebinding(self):
''' property rolebinding '''
return self._rolebinding
@rolebinding.setter
def rolebinding(self, config):
''' setter for property rolebinding '''
self._rolebinding = config
def get_object_by_kind(self, kind):
'''return the current object kind by name'''
if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
return self.deploymentconfig
elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
return self.service
elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
return self.serviceaccount
elif re.match("secret", kind, flags=re.IGNORECASE):
return self.secret
elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
return self.rolebinding
return None
def get(self):
''' return the self.router_parts '''
self.service = None
self.deploymentconfig = None
self.serviceaccount = None
self.secret = None
self.rolebinding = None
for part in self.router_parts:
result = self._get(part['kind'], rname=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'sa':
self.serviceaccount = ServiceAccount(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'secret':
self.secret = Secret(content=result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
self.rolebinding = RoleBinding(content=result['results'][0])
return {'deploymentconfig': self.deploymentconfig,
'service': self.service,
'serviceaccount': self.serviceaccount,
'secret': self.secret,
'clusterrolebinding': self.rolebinding,
}
def exists(self):
'''return a whether svc or dc exists '''
if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
return True
return False
def delete(self):
'''return all pods '''
parts = []
for part in self.router_parts:
parts.append(self._delete(part['kind'], part['name']))
rval = 0
for part in parts:
if part['returncode'] != 0 and not 'already exist' in part['stderr']:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def add_modifications(self, deploymentconfig):
'''modify the deployment config'''
# We want modifications in the form of edits coming in from the module.
# Let's apply these here
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig
# pylint: disable=too-many-branches
def _prepare_router(self):
'''prepare router for instantiation'''
# if cacert, key, and cert were passed, combine them into a pem file
if (self.config.config_options['cacert_file']['value'] and
self.config.config_options['cert_file']['value'] and
self.config.config_options['key_file']['value']):
router_pem = '/tmp/router.pem'
with open(router_pem, 'w') as rfd:
rfd.write(open(self.config.config_options['cert_file']['value']).read())
rfd.write(open(self.config.config_options['key_file']['value']).read())
if self.config.config_options['cacert_file']['value'] and \
os.path.exists(self.config.config_options['cacert_file']['value']):
rfd.write(open(self.config.config_options['cacert_file']['value']).read())
atexit.register(Utils.cleanup, [router_pem])
self.config.config_options['default_cert']['value'] = router_pem
elif self.config.config_options['default_cert']['value'] is None:
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
options = self.config.to_option_list()
cmd = ['router', self.config.name, '-n', self.config.namespace]
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# pylint: disable=maybe-no-member
if results['returncode'] != 0 or 'items' not in results['results']:
return results
oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
'Secret': {'obj': None, 'path': None, 'update': False},
'ServiceAccount': {'obj': None, 'path': None, 'update': False},
'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
'Service': {'obj': None, 'path': None, 'update': False},
}
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
elif res['kind'] == 'Service':
oc_objects['Service']['obj'] = Service(res)
elif res['kind'] == 'ServiceAccount':
oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
elif res['kind'] == 'Secret':
oc_objects['Secret']['obj'] = Secret(res)
elif res['kind'] == 'ClusterRoleBinding':
oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
# Currently only deploymentconfig needs updating
# Verify we got a deploymentconfig
if not oc_objects['DeploymentConfig']['obj']:
return results
# add modifications added
oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
for oc_type, oc_data in oc_objects.items():
if oc_data['obj'] is not None:
oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
return oc_objects
def create(self):
'''Create a router
This includes the different parts:
- deploymentconfig
- service
- serviceaccount
- secrets
- clusterrolebinding
'''
results = []
self.needs_update()
import time
# pylint: disable=maybe-no-member
for kind, oc_data in self.prepared_router.items():
if oc_data['obj'] is not None:
time.sleep(1)
if self.get_object_by_kind(kind) is None:
results.append(self._create(oc_data['path']))
elif oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0 and not 'already exist' in result['stderr']:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the router. This performs a replace'''
results = []
# pylint: disable=maybe-no-member
for _, oc_data in self.prepared_router.items():
if oc_data['update']:
results.append(self._replace(oc_data['path']))
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
def needs_update(self):
''' check to see if we need to update '''
# ServiceAccount:
# Need to determine changes from the pregenerated ones from the original
# Since these are auto generated, we can skip
skip = ['secrets', 'imagePullSecrets']
if self.serviceaccount is None or \
not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
self.serviceaccount.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['ServiceAccount']['update'] = True
# Secret:
# See if one was generated from our dry-run and verify it if needed
if self.prepared_router['Secret']['obj']:
if not self.secret:
self.prepared_router['Secret']['update'] = True
if self.secret is None or \
not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
self.secret.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Secret']['update'] = True
# Service:
# Fix the ports to have protocol=TCP
for port in self.prepared_router['Service']['obj'].get('spec.ports'):
port['protocol'] = 'TCP'
skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
if self.service is None or \
not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
self.service.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['Service']['update'] = True
# DeploymentConfig:
# Router needs some exceptions.
# We do not want to check the autogenerated password for stats admin
if self.deploymentconfig is not None:
if not self.config.config_options['stats_password']['value']:
for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].env') or []):
if env_var['name'] == 'STATS_PASSWORD':
env_var['value'] = \
self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
break
# dry-run doesn't add the protocol to the ports section. We will manually do that.
for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
'spec.template.spec.containers[0].ports') or []):
if not 'protocol' in port:
port['protocol'] = 'TCP'
# These are different when generating
skip = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath', 'hostPort',
'defaultMode',
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
self.deploymentconfig.yaml_dict,
skip_keys=skip,
debug=self.verbose):
self.prepared_router['DeploymentConfig']['update'] = True
# Check if any of the parts need updating, if so, return True
# else, no need to update
# pylint: disable=no-member
return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
@staticmethod
def run_ansible(params, check_mode):
'''run ansible idempotent code'''
rconfig = RouterConfig(params['name'],
params['namespace'],
params['kubeconfig'],
{'default_cert': {'value': params['default_cert'], 'include': True},
'cert_file': {'value': params['cert_file'], 'include': False},
'key_file': {'value': params['key_file'], 'include': False},
'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'router_type': {'value': params['router_type'], 'include': False},
'host_network': {'value': params['host_network'], 'include': True},
'external_host': {'value': params['external_host'], 'include': True},
'external_host_vserver': {'value': params['external_host_vserver'],
'include': True},
'external_host_insecure': {'value': params['external_host_insecure'],
'include': True},
'external_host_partition_path': {'value': params['external_host_partition_path'],
'include': True},
'external_host_username': {'value': params['external_host_username'],
'include': True},
'external_host_password': {'value': params['external_host_password'],
'include': True},
'external_host_private_key': {'value': params['external_host_private_key'],
'include': True},
'expose_metrics': {'value': params['expose_metrics'], 'include': True},
'metrics_image': {'value': params['metrics_image'], 'include': True},
'stats_user': {'value': params['stats_user'], 'include': True},
'stats_password': {'value': params['stats_password'], 'include': True},
'stats_port': {'value': params['stats_port'], 'include': True},
# extra
'cacert_file': {'value': params['cacert_file'], 'include': False},
# edits
'edits': {'value': params['edits'], 'include': False},
})
state = params['state']
ocrouter = Router(rconfig, verbose=params['debug'])
api_rval = ocrouter.get()
########
# get
########
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocrouter.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# In case of delete we return a list of each object
# that represents a router and its result in a list
# pylint: disable=redefined-variable-type
api_rval = ocrouter.delete()
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocrouter.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocrouter.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not ocrouter.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocrouter.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
| brenton/openshift-ansible | roles/lib_openshift/src/class/oc_adm_router.py | Python | apache-2.0 | 21,561 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_data_labels11.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': 1, 'leader_lines': 1, 'position': 'best_fit'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels11.py | Python | bsd-2-clause | 1,503 |
from sqlalchemy import Column, Integer, String, ForeignKey, Table, create_engine, MetaData, Date, DateTime, Float, Boolean
from sqlalchemy.orm import relationship, backref, scoped_session, sessionmaker, relation
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy
Base = declarative_base()
##########################################################################################
#
# Lookup Tables
#
##########################################################################################
##########################################################################################
#
# Primary Tables
#
##########################################################################################
class BT_Stocks(Base):
__tablename__ = 'stocks'
id = Column(Integer, primary_key = True)
Symbol = Column(String(16)) # Have to support null to start...
Date = Column(DateTime)
Open = Column(Float)
High = Column(Float)
Low = Column(Float)
Close = Column(Float)
Volume = Column(Integer)
creation_date = Column(DateTime)
last_update = Column(DateTime)
# end of BT_Stocks
| jay-johnson/sci-pype | src/databases/schema/db_schema_stocks.py | Python | apache-2.0 | 1,377 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: LIN Yu <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it wil l be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
import openerp.addons.decimal_precision as dp
import time
from datetime import datetime
import pytz
import os,glob
import csv,xlwt
from xlsxwriter.workbook import Workbook
import shutil
import base64
from tools.translate import _
import logging
_logger = logging.getLogger(__name__)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class stock_move_report_wizard(osv.osv_memory):
_name = 'stock.move.report.wizard'
_description = 'Stock Move Report Wizard'
_columns = {
'start_date': fields.datetime('Start Date'),
'end_date': fields.datetime('End Date'),
'type': fields.selection([('in','In'),('out','Out'),('internal','Internal'),('scrap','Scrap'),('consumption','Consumption'),('production','production'),('all','all')],string='Type',required=True),
}
_defaults = {
'start_date': lambda *a: time.strftime('%Y-%m-%d 16:00:00'),
'end_date': lambda *a: time.strftime('%Y-%m-%d 15:59:59'),
'type': 'in',
}
def generate_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
context['start_date'] = data.start_date
context['end_date'] = data.end_date
context['type'] = data.type
pi_obj = self.pool.get('stock.move.report')
pi_obj.generate_report(cr, uid, context)
mod_obj = self.pool.get('ir.model.data')
res = mod_obj.get_object_reference(cr, uid, 'move_reports', 'view_move_report_tree')
res_id = res and res[1] or False,
return {
'name': _('Stock Move Report'),
'view_type': 'form',
'view_mode': 'tree',
'view_id': res_id,
'res_model': 'stock.move.report',
'context': "{}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': False,
}
stock_move_report_wizard()
class stock_move_report(osv.osv):
_name = 'stock.move.report'
_description = 'Stock Move Report'
_rec_name = "move_id"
_order = 'date desc'
_create_sql = """
INSERT INTO stock_move_report
(
create_uid,
write_uid,
create_date,
write_date,
move_id,
date,
date_expected,
origin,
picking_id,
picking_name,
type,
pick_return,
partner_ref,
partner_id,
partner_name,
stock_type_id,
stock_type_name,
category_id,
category_name,
product_sku,
product_id,
product_name,
move_qty,
product_qty,
uom_id,
uom_name,
product_uom_name,
uom_factor,
product_price,
price_unit,
cost_total,
po_price,
amount_total,
loc_name,
loc_dest_name,
return_reason
)
SELECT %d, %d, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
m.id as move_id, m.date, m.date_expected, m.origin,
p.id as picking_id, p.name as picking_name,
p.type as type, p.return as pick_return,
rp.ref as partner_ref, rp.id as partner_id, rp.name as partner_name,
st.id as stock_type_id , st.name as stock_type_name,
c.id as category_id, cp.name || ' / ' || c.name as category_name,
m.product_code as product_sku, pp.id as product_id, pt.name as product_name,
m.product_qty as move_qty, m.product_qty * pu.factor / u.factor as product_qty,
u.id as uom_id, u.name as uom_name, pu.name as product_uom_name, pu.factor / u.factor as uom_facotr,
m.price_unit / pu.factor * u.factor as product_price, m.price_unit as price_unit, round(m.product_qty * pu.factor / u.factor*m.price_unit, 4) as cost_total,
m.po_price as po_price, m.amount_total as amount_total,
sl.complete_name as location_name,
sld.complete_name as location_dest_name,
srr.code as return_reason
from stock_move m
left join stock_picking p on p.id = m.picking_id
left join product_product pp on pp.id = m.product_id
left join product_template pt on pt.id = pp.product_tmpl_id
left join product_category c on c.id = pt.categ_id
left join product_category cp on cp.id = c.parent_id
left join product_stock_type st on st.id = pt.stock_type_id
left join product_uom u on u.id = m.product_uom
left join product_uom pu on pu.id = pt.uom_id
left join res_partner rp on rp.id = m.partner_id
left join stock_location sl on sl.id = m.location_id
left join stock_location sld on sld.id = m.location_dest_id
left join stock_return_reason srr on srr.id = m.return_reason_id
where %s
order by m.id
"""#uid,uid,domain
_reverse_sql = """
INSERT INTO stock_move_report
(
create_uid,
write_uid,
create_date,
write_date,
move_id,
date,
date_expected,
origin,
picking_id,
picking_name,
type,
pick_return,
partner_ref,
partner_id,
partner_name,
stock_type_id,
stock_type_name,
category_id,
category_name,
product_sku,
product_id,
product_name,
move_qty,
product_qty,
uom_id,
uom_name,
product_uom_name,
uom_factor,
product_price,
price_unit,
cost_total,
po_price,
amount_total,
loc_name,
loc_dest_name,
return_reason
)
SELECT %d, %d, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
m.id as move_id, m.date, m.date_expected, m.origin,
p.id as picking_id, p.name as picking_name,
p.type as type, p.return as pick_return,
rp.ref as partner_ref, rp.id as partner_id, rp.name as partner_name,
st.id as stock_type_id , st.name as stock_type_name,
c.id as category_id, cp.name || ' / ' || c.name as category_name,
m.product_code as product_sku, pp.id as product_id, pt.name as product_name,
-m.product_qty as product_qty, -m.product_qty * pu.factor / u.factor,
u.id as uom_id, u.name as uom_name, pu.name as product_uom_name, pu.factor / u.factor as uom_facotr,
m.price_unit / pu.factor * u.factor as product_price, m.price_unit as price_unit, round(-m.product_qty * pu.factor / u.factor *m.price_unit,4) as cost_total,
m.po_price as po_price, m.amount_total as amount_total,
sl.complete_name as location_name,
sld.complete_name as location_dest_name,
srr.code as return_reason
from stock_move m
left join stock_picking p on p.id = m.picking_id
left join product_product pp on pp.id = m.product_id
left join product_template pt on pt.id = pp.product_tmpl_id
left join product_category c on c.id = pt.categ_id
left join product_category cp on cp.id = c.parent_id
left join product_stock_type st on st.id = pt.stock_type_id
left join product_uom u on u.id = m.product_uom
LEFT JOIN product_uom pu ON (pt.uom_id=pu.id)
left join res_partner rp on rp.id = m.partner_id
left join stock_location sl on sl.id = m.location_id
left join stock_location sld on sld.id = m.location_dest_id
left join stock_return_reason srr on srr.id = m.return_reason_id
WHERE %s
ORDER BY m.id;
"""#uid,uid,domain
_in_header = " date, origin, picking_name, type, pick_return, partner_ref, partner_name, stock_type_name, category_name, product_sku, product_name, move_qty, product_qty, uom_name, product_uom_name, product_price, po_price, amount_total, loc_name, loc_dest_name, return_reason"
_out_header = " date, origin, picking_name, type, pick_return, partner_ref, partner_name, stock_type_name, category_name, product_sku, product_name, move_qty, product_qty, uom_name, product_uom_name, uom_factor, product_price, price_unit, cost_total, loc_name, loc_dest_name, return_reason"
_read_header = " date, origin, picking_name, type, pick_return, partner_ref, partner_name, stock_type_name, category_name, product_sku, product_name, move_qty, product_qty, uom_name, product_uom_name, uom_factor, product_price, price_unit, cost_total, loc_name, loc_dest_name, return_reason"
_read_sql = """
SELECT %s FROM stock_move_report;
"""
def _get_table_data(self, cr, uid, type, context=None):
#print "==============#LY %s"% self._read_sql
if type == "in":
header = self._in_header
elif type== "out":
header = self._out_header
else:
header = self._read_header
sql = self._read_sql%header
cr.execute(sql)
content = cr.fetchall()
header = header.split(',')
return header, content
def _get_warehouse_group(self, cr, uid, name="Warehouse"):
group_ids = self.pool.get('mail.group').search(cr, uid, [('name', 'ilike', name)])
return group_ids and group_ids[0] or False
def _prepare_filter(self, cr, uid, context=None):
if not context:
context={}
start_date = context.get('start_date','2013-03-31 16:00:00') #timezone 'CST' to 'UTC'
end_date = context.get('end_date','2013-04-30 15:59:59')# timezone 'CST' to 'UTC'
type = context.get('type','in')
res="""
m.date >= '%s'
and m.date <= '%s'
and m.state = 'done'
"""%(start_date, end_date)
if type == 'in':
res += """AND (p.type = 'in' and p.return='none')
"""
elif type == 'out':
res += """AND (p.type = 'out' and p.return='none')
"""
elif type == 'internal':
res += """and p.type = 'internal'
"""
elif type == 'scrap':
res += """AND m.location_id IN (SELECT id FROM stock_location WHERE usage='inventory')
AND m.location_dest_id NOT IN (SELECT id FROM stock_location WHERE usage='inventory')
"""
elif type == 'consumption':
res += """AND m.production_id IS NULL
AND m.location_id NOT IN (SELECT id FROM stock_location WHERE usage='production')
AND m.location_dest_id IN (SELECT id FROM stock_location WHERE usage='production')
"""
elif type == 'production':
res += """AND m.production_id IS NOT NULL
AND m.location_id IN (SELECT id FROM stock_location WHERE usage='production')
AND m.location_dest_id NOT IN (SELECT id FROM stock_location WHERE usage='production')
"""
return res
def _reverse_filter(self, cr, uid, context=None):
if not context:
context={}
start_date = context.get('start_date','2013-03-31 16:00:00') #timezone 'CST' to 'UTC'
end_date = context.get('end_date','2013-04-30 15:59:59')# timezone 'CST' to 'UTC'
type = context.get('type','in')
res="""
m.date >= '%s'
AND m.date <= '%s'
AND m.state = 'done'
"""%(start_date, end_date)
if type == 'in':
res += """AND (p.type = 'out' AND p.return='supplier')
"""
elif type == 'out':
res += """AND (p.type = 'in' AND p.return='customer')
"""
elif type == 'scrap':
res += """AND m.location_id NOT IN (SELECT id FROM stock_location WHERE usage='inventory')
AND m.location_dest_id IN (SELECT id FROM stock_location WHERE usage='inventory')
"""
# elif type == 'consumption':
# res += """AND m.production_id IS NULL
# AND m.location_id NOT IN (SELECT id FROM stock_location WHERE usage='production')
# AND m.location_dest_id IN (SELECT id FROM stock_location WHERE usage='production')
# """
# elif type == 'production':
# res += """AND m.production_id IS NOT NULL
# AND m.location_id NOT IN (SELECT id FROM stock_location WHERE usage='production')
# AND m.location_dest_id IN (SELECT id FROM stock_location WHERE usage='production')
# """
else:
res="False"
return res
def _create_message(self, cr, uid, attachment_ids=None,context=None):
mess_pool = self.pool.get('mail.message')
partner_ids = [uid]
if uid != 1:
partner_ids.append(1)
tz = pytz.timezone(context.get('tz','Asia/Shanghai'))
tznow = pytz.utc.localize(datetime.now()).astimezone(tz).strftime('%Y-%m-%d %H:%M:%S')
message_id = mess_pool.create(cr, uid, {
'type': 'notification',
'partner_ids': partner_ids,
'subject': 'Your Move Report has been generated %s'%tznow,
'body': 'Your Move Report has been generated %s'%tznow,
'subtype_id': 1,
'res_id': self._get_warehouse_group(cr, uid),
'model': 'mail.group',
'record_name': 'Stock Move Report',
'attachment_ids': attachment_ids
})
mess_pool.set_message_starred(cr, uid, [message_id], True)
return message_id
_columns = {
'move_id': fields.many2one('stock.move', 'Stock Move', required=True),
'date_expected': fields.datetime('Date Expected'),
'date': fields.datetime('Date'),
'origin': fields.char('Origin', size=32),
'picking_id': fields.many2one('stock.picking', 'Stock Picking'),
'picking_name': fields.char('Picking Name', size=64),
'type': fields.char('Type', size=16),
'pick_return': fields.char('Return', size=16),
'return_reason': fields.char('Return Reason', size=16),
'partner_ref': fields.char('Partner Ref', size=16),
'partner_name': fields.char('Partner Name', size=128),
'partner_id': fields.many2one('res.partner', 'Partner'),
'stock_type_id': fields.many2one('product.stock_type',string='Stock Type'),
'stock_type_name': fields.char('Stock Type Name', size=128),
'category_id': fields.many2one('product.category',string='Category'),
'category_name': fields.char('Category Name', size=128),
'product_sku': fields.char('SKU', size=16),
'product_name': fields.char('Product Name', size=1024),
'product_id': fields.many2one('product.product', 'Product'),
'move_qty': fields.float("Move Quantity", digits_compute=dp.get_precision('Product Unit of Measure')),
'product_qty': fields.float("Product Quantity", digits_compute=dp.get_precision('Product Unit of Measure')),
'uom_id': fields.many2one('product.uom',string='UoM'),
'uom_factor': fields.float('Uom Ratio' ,digits=(12, 12),
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'uom_name': fields.char('UoM Name', size=32),
'product_uom_name': fields.char('Product UoM', size=32),
'loc_name': fields.char('Source Location Name', size=256),
'loc_dest_name': fields.char('Dest Location Name', size=256),
'po_price': fields.float("PO Price", digits_compute=dp.get_precision('Account')),
'product_price': fields.float("Product Price", digits_compute=dp.get_precision('Account')),
'price_unit': fields.float("Price Unit", digits_compute=dp.get_precision('Account')),
'amount_total': fields.float("Purchase total", digits_compute=dp.get_precision('Account')),
'cost_total': fields.float("Cost Total", digits_compute=dp.get_precision('Account')),
}
_defaults = {
}
def generate_report(self, cr, uid, context=None):
cr.execute("""TRUNCATE TABLE stock_move_report""")
filter = self._prepare_filter(cr, uid, context)
#create sql
sql = self._create_sql%(uid, uid, filter)
cr.execute(sql)
#reverse sql
type = context.get('type','in')
if type not in ('consumption','production'):
filter = self._reverse_filter(cr, uid, context)
sql = self._reverse_sql%(uid, uid, filter)
if sql:
cr.execute(sql)
#create fold
if not os.path.exists('/tmp/oe-report/'):
os.mkdir('/tmp/oe-report')
filelist = glob.glob("/tmp/oe-report/*.xlsx")
for f in filelist:
os.remove(f)
os.chmod('/tmp/oe-report',0777)#check rights
#TODO
header, content = self._get_table_data(cr, uid, type, context)
csv_file = '/tmp/stock.move.report.csv'
with open(csv_file, "wb") as f:
fileWriter = csv.writer(f, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
fileWriter.writerow(header)
fileWriter.writerows(content)
#cr.execute("COPY stock_move_in_report TO '/tmp/oe-report/stock.move.report.csv' WITH CSV HEADER NULL AS '' DELIMITER ';'")
#create message
message_id = self._create_message(cr, uid,context=context)
attachment_pool = self.pool.get('ir.attachment')
def convert_time(time):
tz = pytz.timezone('Asia/Shanghai')
time = pytz.utc.localize(datetime.strptime(time,'%Y-%m-%d %H:%M:%S')).astimezone(tz).strftime('%Y-%m-%d %H:%M:%S')
return time
period = "%s~%s"%(convert_time(context.get('start_date','2013-03-31 16:00:00')),convert_time(context.get('end_date','2013-03-31 16:00:00')))
xlsfile = '/tmp/oe-report/stock.move.report.%s[%s].xlsx'%(type,period)
#print xlsfile
w = Workbook(xlsfile)
ws = w.add_worksheet('Stock Moves')
ufile = open(csv_file,'r')
spamreader = csv.reader(ufile, delimiter=',', quotechar='"')
#line = 0
for rowx, row in enumerate(spamreader):
for colx, cell in enumerate(row):
ws.write(rowx, colx, unicode(cell, 'utf-8'))
# for row in spamreader:
# print ', '.join(row)
# col=0
# for cell in row:
# ws.write(line,col,unicode(cell, 'utf-8'))
# col += 1
# line +=1
w.close()
shutil.make_archive("/tmp/stock_move_report_%s[%s]"%(type,period), "zip", "/tmp/oe-report")
zipfile = open('/tmp/stock_move_report_%s[%s].zip'%(type,period),'r')
attachment_id = attachment_pool.create(cr, uid, {
'name': "stock.move.report.%s[%s].zip"%(type,period),
'datas': base64.encodestring(zipfile.read()),
'datas_fname': "stock.move.report.%s[%s].zip"%(type,period),
'res_model': 'mail.message',
'res_id': message_id,
})
cr.execute("""
INSERT INTO message_attachment_rel(
message_id, attachment_id)
VALUES (%s, %s);
""", (message_id, attachment_id))
return True
stock_move_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| udayinfy/openerp-7.0 | move_reports/stock_move_report/stock_move_report.py | Python | agpl-3.0 | 23,230 |
# #
# Copyright 2009-2013 Ghent University
#
# This file is part of hanythingondemand
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/hanythingondemand
#
# hanythingondemand is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# hanythingondemand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hanythingondemand. If not, see <http://www.gnu.org/licenses/>.
# #
"""
@author: Ewan Higgs (Ghent University)
"""
import os
from ConfigParser import NoOptionError, NoSectionError, SafeConfigParser
from collections import Mapping, namedtuple
from copy import deepcopy
from os.path import join as mkpath, dirname, realpath
from pkg_resources import Requirement, resource_filename, resource_listdir
import hod
from hod.node.node import Node
from hod.commands.command import COMMAND_TIMEOUT
import hod.config.template as hct
from vsc.utils import fancylogger
_log = fancylogger.getLogger(fname=False)
# hod manifest config sections
_META_SECTION = 'Meta'
_CONFIG_SECTION = 'Config'
# serviceaconfig sections
_UNIT_SECTION = 'Unit'
_SERVICE_SECTION = 'Service'
_ENVIRONMENT_SECTION = 'Environment'
RUNS_ON_MASTER = 0x1
RUNS_ON_SLAVE = 0x2
RUNS_ON_ALL = RUNS_ON_MASTER | RUNS_ON_SLAVE
HOD_ETC_DIR = os.path.join('etc', 'hod')
def load_service_config(fileobj):
'''
Load a .ini style config for a service.
'''
config = SafeConfigParser()
# optionxform = Option Transform; using str stops making it lower case.
config.optionxform = str
config.readfp(fileobj)
return config
def _abspath(filepath, working_dir):
'''
Take a filepath and working_dir and return the absolute path for the
filepath. If the filepath is already absolute then just return it.
>>> _abspath('somedir/file', '/tmp')
/tmp/somedir/file
>>> _abspath('', '/tmp')
/tmp
>>> _abspath('/not-tmp/somedir/file', '/tmp')
/not-tmp/somedir/file
'''
if not len(filepath):
return realpath(working_dir)
elif filepath[0] == '/': # filepath is already absolute
return filepath
return realpath(mkpath(working_dir, filepath))
def _fileobj_dir(fileobj):
'''
Return the directory of the fileobj if it exists. If it's a file-like
object (e.g. StringIO) just return a blank string.
'''
if hasattr(fileobj, 'name'):
return dirname(fileobj.name)
return ''
def _parse_runs_on(s):
'''Returns the relevant constant depending on the string argument'''
options = dict(master=RUNS_ON_MASTER, slave=RUNS_ON_SLAVE, all=RUNS_ON_ALL)
return options[s.lower()]
def _cfgget(config, section, item, dflt=None, **kwargs):
'''
Get a value from a ConfigParser object or a default if it's not there.
Options in kwargs override the config.
'''
if kwargs.get(item, None) is not None:
return kwargs[item]
if dflt is None:
return config.get(section, item)
try:
return config.get(section, item)
except (NoSectionError, NoOptionError):
return dflt
def parse_comma_delim_list(s):
'''
Convert a string containing a comma delimited list into a list of strings
with no spaces on the end or beginning.
Blanks are also removed. e.g. 'a,,b' results in ['a', 'b']
'''
return [tok.strip() for tok in [el for el in s.split(',') if el.strip()]]
class PreServiceConfigOpts(object):
r"""
Manifest file for the group of services responsible for defining service
level configs which need to be run through the template before any services
can begin.
aka hod.conf or hodconf.
"""
__slots__ = ['version', 'workdir', 'config_writer', 'directories',
'autogen', 'modules', 'service_configs', 'service_files',
'master_env', '_hodconfdir'
]
OPTIONAL_FIELDS = ['master_env', 'modules', 'service_configs', 'directories', 'autogen']
@staticmethod
def from_file_list(filenames, **kwargs):
"""Create and merge PreServiceConfigOpts from a list of filenames."""
precfgs = [PreServiceConfigOpts(open(f, 'r'), **kwargs) for f in filenames]
precfg = reduce(merge, precfgs)
bad_fields = invalid_fields(precfg)
if bad_fields:
raise RuntimeError("A valid configuration could not be generated from the files: %s: missing fields: %s" %
(filenames, bad_fields))
return precfg
def __init__(self, fileobj, **kwargs):
_config = load_service_config(fileobj)
self.version = _cfgget(_config, _META_SECTION, 'version', '', **kwargs)
self.workdir = _cfgget(_config, _CONFIG_SECTION, 'workdir', '', **kwargs)
self._hodconfdir = _fileobj_dir(fileobj)
def _fixup_path(cfg):
return _abspath(cfg, self._hodconfdir)
def _get_list(name):
'''
With lists, we don't want to overwrite the value with kwargs.
Mergely append.
'''
lst = parse_comma_delim_list(_cfgget(_config, _CONFIG_SECTION, name, ''))
if kwargs.get(name, None) is not None:
lst.extend(parse_comma_delim_list(kwargs[name]))
return lst
self.modules = _get_list('modules')
self.master_env = _get_list('master_env')
self.service_files = _get_list('services')
self.service_files = [_fixup_path(cfg) for cfg in self.service_files]
self.directories = _get_list('directories')
self.config_writer = _cfgget(_config, _CONFIG_SECTION, 'config_writer', '')
self.service_configs = _collect_configs(_config)
self.autogen = parse_comma_delim_list(_cfgget(_config, _CONFIG_SECTION, 'autogen', ''))
@property
def localworkdir(self):
return hct.mklocalworkdir(self.workdir)
@property
def configdir(self):
return mkpath(self.localworkdir, 'conf')
@property
def hodconfdir(self):
return self._hodconfdir
def autogen_configs(self):
'''
Lazily generate the missing configurations as a convenience to
users.
This should only be run when processing the config file while the job is
being run (e.g. from hod.subcommands.local.LocalApplication). e.g. If
workdir is a directory on a file system that is not accessible from the
login node then we can't process this information from the login node.
'''
node = Node()
node_info = node.go()
_log.debug('Collected Node information: %s', node_info)
for autocfg in self.autogen:
fn = autogen_fn(autocfg)
new_configs = fn(self.workdir, node_info)
for cfgname in new_configs.keys():
if cfgname in self.service_configs:
new_configs[cfgname].update(self.service_configs[cfgname])
self.service_configs = new_configs
def __str__(self):
return 'PreServiceConfigOpts(version=%s, workdir=%s, modules=%s, ' \
'master_env=%s, service_files=%s, directories=%s, ' \
'config_writer=%s, service_configs=%s)' % (self.version,
self.workdir, self.modules, self.master_env,
self.service_files, self.directories,
self.config_writer, self.service_configs)
def merge(lhs, rhs):
"""
Merge two objects of the same type based on their __slot__ list. This
returns a fresh object and the originals should not be replaced.
Rules:
List types are concatenated.
Dict types are merged using a deep merge.
String types are overwritten.
"""
if type(lhs) != type(rhs):
raise RuntimeError('merge can only use two of the same type')
def _update(a, b):
for k, v in b.iteritems():
if isinstance(v, Mapping):
c = _update(a.get(k, dict()), v)
a[k] = c
else:
a[k] = b[k]
return a
lhs = deepcopy(lhs)
for attr in lhs.__slots__:
_lhs = getattr(lhs, attr)
_rhs = getattr(rhs, attr)
# cat lists
if isinstance(_lhs, list):
_lhs += _rhs
# update dicts
elif isinstance(_lhs, Mapping):
_lhs = _update(_lhs, _rhs)
# replace strings
elif isinstance(_lhs, basestring) and _rhs:
_lhs = _rhs
return lhs
def invalid_fields(obj):
"""Return list of fields which are empty."""
bad_fields = []
for attr in obj.__slots__:
if not (attr in obj.OPTIONAL_FIELDS or getattr(obj, attr) or
attr.startswith('_')):
bad_fields.append(attr)
return bad_fields
def _collect_configs(config):
"""Convert sections into dicts of options"""
service_configs = dict()
for section in [s for s in config.sections() if s not in [_META_SECTION, _CONFIG_SECTION]]:
option_dict = dict()
options = config.options(section)
for option in options:
option_dict[option] = config.get(section, option)
service_configs[section] = option_dict
return service_configs
def env2str(env):
'''
Take a dict of environment variable names mapped to their values and
convert it to a string that can be used to prepend a command.
'''
envstr = ''
for k, v in sorted(env.items()):
envstr += '%s="%s" ' % (k, v)
return envstr
class ConfigOpts(object):
r"""
Wrapper for the service configuration.
Each of the config values can have a $variable which will be replaces
by the value in the template strings except 'name'. Name cannot be
templated.
Some of the slots are computed on call so that they can run on the Slave
nodes as opposed to the Master nodes.
"""
@staticmethod
def from_file(fileobj, template_resolver):
"""Load a ConfigOpts from a configuration file."""
config = load_service_config(fileobj)
name = _cfgget(config, _UNIT_SECTION, 'Name')
runs_on = _parse_runs_on(_cfgget(config, _UNIT_SECTION, 'RunsOn'))
pre_start_script = _cfgget(config, _SERVICE_SECTION, 'ExecStartPre', '')
start_script = _cfgget(config, _SERVICE_SECTION, 'ExecStart')
stop_script = _cfgget(config, _SERVICE_SECTION, 'ExecStop')
env = dict(config.items(_ENVIRONMENT_SECTION))
return ConfigOpts(name, runs_on, pre_start_script, start_script, stop_script, env, template_resolver)
def to_params(self, workdir, modules, master_template_args):
"""Create a ConfigOptsParams object from the ConfigOpts instance"""
return ConfigOptsParams(self.name, self._runs_on, self._pre_start_script, self._start_script,
self._stop_script, self._env, workdir, modules, master_template_args, self.timeout)
@staticmethod
def from_params(params, template_resolver):
"""Create a ConfigOpts instance from a ConfigOptsParams instance"""
return ConfigOpts(params.name, params.runs_on, params.pre_start_script, params.start_script,
params.stop_script, params.env, template_resolver, params.timeout)
def __init__(self, name, runs_on, pre_start_script, start_script, stop_script, env, template_resolver,
timeout=COMMAND_TIMEOUT):
self.name = name
self._runs_on = runs_on
self._tr = template_resolver
self._pre_start_script = pre_start_script
self._start_script = start_script
self._stop_script = stop_script
self._env = env
self.timeout = timeout
@property
def pre_start_script(self):
return self._tr(self._pre_start_script)
@property
def start_script(self):
return self._tr(self._start_script)
@property
def stop_script(self):
return self._tr(self._stop_script)
@property
def workdir(self):
return self._tr.workdir
@property
def localworkdir(self):
return hct.mklocalworkdir(self._tr.workdir)
@property
def configdir(self):
return mkpath(self.localworkdir, 'conf')
@property
def env(self):
return dict([(k, self._tr(v)) for k, v in self._env.items()])
def __str__(self):
return 'ConfigOpts(name=%s, runs_on=%d, pre_start_script=%s, ' \
'start_script=%s, stop_script=%s, workdir=%s, localworkdir=%s)' % (self.name,
self._runs_on, self.pre_start_script, self.start_script,
self.stop_script, self.workdir, self.localworkdir)
def __repr__(self):
return 'ConfigOpts(name=%s, runs_on=%d)' % (self.name, self._runs_on)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def runs_on(self, masterrank, ranks):
'''
Given the master rank and all ranks, return a list of the ranks this
service will run on.
'''
if self._runs_on == RUNS_ON_MASTER:
return [masterrank]
elif self._runs_on == RUNS_ON_SLAVE:
return [x for x in ranks if x != masterrank]
elif self._runs_on == RUNS_ON_ALL:
return ranks
else:
raise ValueError('ConfigOpts.runs_on has invalid value: %s' % self._runs_on)
# Parameters to send over the network to allow slaves to construct hod.config.ConfigOpts
# objects
ConfigOptsParams = namedtuple('ConfigOptsParams', [
'name',
'runs_on',
'pre_start_script',
'start_script',
'stop_script',
'env',
'workdir',
'modules',
'master_template_kwargs',
'timeout',
])
def autogen_fn(name):
"""
Given a product name (hadoop, hdfs, etc), generate default configuration
parameters for things that haven't been defined yet.
Params
------
name : `str`
Product name.
Returns
-------
Function taking a working directory (for detecting block sizes and so on)
and a dict of config settings.
"""
module = __import__('hod.config.autogen.%s' % name, fromlist=['hod.config.autogen'])
return getattr(module, 'autogen_config')
def service_config_fn(policy_path):
"""
Given a module string ending in a function name, return the relevant
function.
Params
------
policy_path : `str`
Dotted string path of module. e.g. 'hod.config.write_policy.hadoop_xml'
Returns
-------
function taking dict and TemplateResolver
"""
policy_path_list = policy_path.split('.')
module_name = '.'.join(policy_path_list[:-1])
parent_pkg = '.'.join(policy_path_list[:-2])
fn = policy_path_list[-1]
module = __import__(module_name, fromlist=[parent_pkg])
return getattr(module, fn)
def write_service_config(outfile, data_dict, config_writer, template_resolver):
"""Write service config files to disk."""
with open(outfile, 'w') as f:
f.write(config_writer(outfile, data_dict, template_resolver))
def resolve_dists_dir():
"""Resolve path to distributions."""
pkg = Requirement.parse(hod.NAME)
return resource_filename(pkg, HOD_ETC_DIR)
def resolve_dist_path(dist):
"""
Given a distribution name like Hadoop-2.3.0-cdh5.0.0, return the path to the
relevant hod.conf
"""
distpath = resolve_dists_dir()
distpath = mkpath(distpath, dist, 'hod.conf')
return distpath
def avail_dists():
"""Return a list of available distributions"""
pkg = Requirement.parse(hod.NAME)
return resource_listdir(pkg, HOD_ETC_DIR)
def resolve_config_paths(config, dist):
"""
Take two strings and return:
1. config if it's defined.
2. The expanded dist path if config is not defined.
"""
if config:
if os.path.exists(config):
path = config
else:
raise ValueError("Specified config file '%s' does not exist." % config)
elif dist:
path = resolve_dist_path(dist)
if not os.path.exists(path):
raise ValueError("Config file for specified dist '%s' does not exist: %s" % (dist, path))
else:
raise RuntimeError('A config or a dist must be provided')
return path
| molden/hanythingondemand | hod/config/config.py | Python | gpl-2.0 | 16,895 |
# -*- coding:utf-8-*-
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from time import *
import sys
class Windows(QDialog):
def __init__(self, parent=None):
super(Windows, self).__init__(parent)
self.startButton = QPushButton("Start")
self.stopButton = QPushButton("Stop")
self.stopButton.setEnabled(False)
self.statusLable = QLabel("Please click \"start\"")
self.statusLable.setFrameStyle(QFrame.StyledPanel|
QFrame.Plain)
topLayout = QHBoxLayout()
topLayout.addWidget(self.startButton)
topLayout.addWidget(self.stopButton)
layout = QVBoxLayout()
layout.addLayout(topLayout)
layout.addWidget(self.statusLable)
self.timer = Timer()
self.connect(self.startButton, SIGNAL("clicked()")
, self.start)
self.connect(self.stopButton, SIGNAL("clicked()")
, self.stop)
self.connect(self.timer, SIGNAL("updateTime()")
, self.updateTime)
self.setLayout(layout)
self.setWindowTitle("Timer")
self.setWindowFlags(Qt.WindowMinimizeButtonHint)
def updateTime(self):
self.statusLable.setText("Time: %s s" % QString.number(self.sec))
self.sec += 1
def start(self):
self.sec = 0
self.startButton.setEnabled(False)
self.stopButton.setEnabled(True)
self.timer.start()
def stop(self):
self.timer.stop()
self.stopButton.setEnabled(False)
self.startButton.setEnabled(True)
self.statusLable.setText("Timer stoped.")
class Timer(QThread):
def __init__(self, parent=None):
super(Timer, self).__init__(parent)
self.stoped = False
self.mutex = QMutex()
def run(self):
with QMutexLocker(self.mutex):
self.stoped = False
while True:
if self.stoped:
return
self.emit(SIGNAL("updateTime()"))
sleep(1)
def stop(self):
with QMutexLocker(self.mutex):
self.stoped = True
def isStoped(self):
with QMutexLocker(sellf.mutex):
return self.stoped
app = QApplication(sys.argv)
windows = Windows()
windows.show()
app.exec_() | ptphp/PyLib | src/dev/case/timethread.py | Python | apache-2.0 | 2,356 |
__author__ = 'andrew'
from timeutil import *
class ModemLog(dict):
fields = ('finished', 'message_number', 'datetime',
'logged_message')
# This automagically retrieves values from the dictionary when they are referenced as properties.
# Whether this is awesome or sucks is open to debate.
def __getattr__(self, item):
"""Maps values to attributes.
Only called if there *isn't* an attribute with this name
"""
try:
return self.__getitem__(item)
except KeyError:
raise AttributeError(item)
def __setattr__(self, item, value):
"""Maps attributes to values.
Only if we are initialised
"""
if not self.__dict__.has_key('_CycleStats__initialized'): # this test allows attributes to be set in the __init__ method
return dict.__setattr__(self, item, value)
elif self.__dict__.has_key(item): # any normal attributes are handled normally
dict.__setattr__(self, item, value)
else:
self.__setitem__(item, value)
#For backward compatibility with classes that directly access the values dict.
@property
def values(self):
return self
def __str__(self):
''' Default human-readable version of ModemLog.
Doesn't show all parameters, just the most useful ones.'''
hrstr = "{dt}\tMessage: {msg}\t".format(
dt=self['datetime'], msg=self['logged_message'])
return hrstr
@classmethod
def from_nmea_msg(cls, msg):
values = dict.fromkeys(ModemLog.fields)
values['finished'] = int(msg["params"][0])
values['message_number'] = int(msg["params"][1])
values['datetime']= convert_to_datetime(msg["params"][2])
values['logged_message'] = (msg["params"][3:])
log = cls(values)
return log
| whoi-acomms/pyacomms | acomms/modemlog.py | Python | lgpl-3.0 | 2,056 |
from datetime import date
import string
from . import base
from . import mixins
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericIdentifierMixin,
mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last name',
'first_name': 'First name',
'department_name': 'Personnel Area',
'job_title': 'Position Title',
'hire_date': 'Most Recent Hire Dt.',
'status': 'Emp Sub-Group',
'gender_type': 'Gender',
'given_race': 'Ethnicity',
'compensation': 'Annual Salary',
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'Travis County'
ORGANIZATION_CLASSIFICATION = 'County'
DATE_PROVIDED = date(2017, 4, 21)
URL = ('http://raw.texastribune.org.s3.amazonaws.com/'
'travis_county/salaries/2017-05/'
'PIR.xlsx')
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'name': unicode(name),
'gender': self.get_mapped_value('gender_type')
}
return r
@property
def gender(self):
gender = self.get_mapped_value('gender_type')
if gender.strip() == '':
return 'Not Given'
else:
return gender
@property
def compensation_type(self):
emptype = self.get_mapped_value('status')
if 'Full' in emptype:
return 'FT'
elif 'Part' in emptype:
return 'PT'
@property
def description(self):
status = self.status
if 'Full' in status:
return 'Annual salary'
elif 'Part' in status:
return 'Part-time, annual salary'
@property
def race(self):
race = self.given_race.strip()
if race == '':
race = 'Not given'
return {'name': race}
@property
def department(self):
dept = self.department_name.strip()
if dept == 'Health and Human Sv and Vet Sv':
dept = 'Health & Human Services and Veterans Services'
elif dept == 'Counseling And Education Sv':
dept = 'Counseling & Education Services'
elif dept == 'Transportation And Nat Rsrc':
dept = 'Transportation & Natural Resources'
elif dept == 'Rcd Mgmt And Comm Rsrc':
dept = 'Records Management Communications Resources'
return dept
transform = base.transform_factory(TransformedRecord)
| texastribune/tx_salaries | tx_salaries/utils/transformers/travis_county.py | Python | apache-2.0 | 2,847 |
from .util import split_package
from osc import conf
DEFAULT_SEARCH_STATES = ("new", "review", "declined")
def search(api, source=None, target=None, user=None, req_type=None, state=DEFAULT_SEARCH_STATES):
if not source and not target and not user:
raise ValueError("You must specify at least one of source, target, user.")
xpath = []
_xval = lambda attr, value: "{}='{}'".format(attr, value)
_xif = lambda attr, value: value and [_xval(attr, value)] or []
# query by state
if not state == "all":
if isinstance(state, str): state = [state]
state_query = " or ".join([_xval("state/@name", s) for s in state])
xpath.append(state_query)
# query by user
if user:
xpath.append(_xval("state/@who", user) + " or " + _xval("history/@who", user))
# query by source and target
if source:
pkg = split_package(source)
xpath += _xif("action/source/@project", pkg.project)
xpath += _xif("action/source/@package", pkg.package)
if target:
pkg = split_package(target)
xpath += _xif("action/target/@project", pkg.project)
xpath += _xif("action/target/@package", pkg.package)
# query by type
xpath += _xif("action/@type", req_type)
if not xpath:
raise ValueError("Something went wrong, the query string is empty.")
xpathstr = "(" + ") and (".join(xpath) + ")"
if conf.config['verbose'] > 1:
print("[ {} ]".format(xpath))
return api.get_xml("/search/request", query={"match": xpathstr})
| matejcik/osc | osclib/request.py | Python | gpl-2.0 | 1,550 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: [email protected]
@copyright: License according to the project license.
'''
NAME='renzi23'
SPELL='rénzǐ'
CN='壬子'
SEQ='49'
if __name__=='__main__':
pass
| sinotradition/sinoera | sinoera/ganzhi/renzi23.py | Python | apache-2.0 | 229 |
"""All Flask blueprints for the entire application.
All blueprints for all views go here. They shall be imported by the views themselves and by application.py. Blueprint
URL paths are defined here as well.
"""
from flask import Blueprint
def _factory(partial_module_string, url_prefix):
"""Generates blueprint objects for view modules.
Positional arguments:
partial_module_string -- string representing a view module without the absolute path.
url_prefix -- URL prefix passed to the blueprint.
Returns:
Blueprint instance for a view module.
"""
name = partial_module_string
import_name = 'mercado.views.{}'.format(partial_module_string)
template_folder = 'templates'
blueprint = Blueprint(name, import_name, template_folder=template_folder, url_prefix=url_prefix)
return blueprint
mercado_api = _factory('api.core', '/api/core')
all_blueprints = (mercado_api,)
| furritos/mercado-api | mercado/blueprints.py | Python | mit | 917 |
from numbers import Integral, Real
from itertools import chain
import string
import numpy as np
import openmc.checkvalue as cv
import openmc.data
# Supported keywords for continuous-energy cross section plotting
PLOT_TYPES = ['total', 'scatter', 'elastic', 'inelastic', 'fission',
'absorption', 'capture', 'nu-fission', 'nu-scatter', 'unity',
'slowing-down power', 'damage']
# Supported keywoards for multi-group cross section plotting
PLOT_TYPES_MGXS = ['total', 'absorption', 'scatter', 'fission',
'kappa-fission', 'nu-fission', 'prompt-nu-fission',
'deleyed-nu-fission', 'chi', 'chi-prompt', 'chi-delayed',
'inverse-velocity', 'beta', 'decay rate', 'unity']
# Create a dictionary which can be used to convert PLOT_TYPES_MGXS to the
# openmc.XSdata attribute name needed to access the data
_PLOT_MGXS_ATTR = {line: line.replace(' ', '_').replace('-', '_')
for line in PLOT_TYPES_MGXS}
_PLOT_MGXS_ATTR['scatter'] = 'scatter_matrix'
# Special MT values
UNITY_MT = -1
XI_MT = -2
# MTs to combine to generate associated plot_types
_INELASTIC = [mt for mt in openmc.data.SUM_RULES[3] if mt != 27]
PLOT_TYPES_MT = {'total': openmc.data.SUM_RULES[1],
'scatter': [2] + _INELASTIC,
'elastic': [2],
'inelastic': _INELASTIC,
'fission': [18],
'absorption': [27], 'capture': [101],
'nu-fission': [18],
'nu-scatter': [2] + _INELASTIC,
'unity': [UNITY_MT],
'slowing-down power': [2] + _INELASTIC + [XI_MT],
'damage': [444]}
# Operations to use when combining MTs the first np.add is used in reference
# to zero
PLOT_TYPES_OP = {'total': (np.add,),
'scatter': (np.add,) * (len(PLOT_TYPES_MT['scatter']) - 1),
'elastic': (),
'inelastic': (np.add,) * (len(PLOT_TYPES_MT['inelastic']) - 1),
'fission': (), 'absorption': (),
'capture': (), 'nu-fission': (),
'nu-scatter': (np.add,) * (len(PLOT_TYPES_MT['nu-scatter']) - 1),
'unity': (),
'slowing-down power':
(np.add,) * (len(PLOT_TYPES_MT['slowing-down power']) - 2) + (np.multiply,),
'damage': ()}
# Types of plots to plot linearly in y
PLOT_TYPES_LINEAR = {'nu-fission / fission', 'nu-scatter / scatter',
'nu-fission / absorption', 'fission / absorption'}
# Minimum and maximum energies for plotting (units of eV)
_MIN_E = 1.e-5
_MAX_E = 20.e6
def plot_xs(this, types, divisor_types=None, temperature=294., data_type=None,
axis=None, sab_name=None, ce_cross_sections=None,
mg_cross_sections=None, enrichment=None, plot_CE=True, orders=None,
divisor_orders=None, **kwargs):
"""Creates a figure of continuous-energy cross sections for this item.
Parameters
----------
this : str or openmc.Material
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to include in the plot.
divisor_types : Iterable of values of PLOT_TYPES, optional
Cross section types which will divide those produced by types
before plotting. A type of 'unity' can be used to effectively not
divide some types.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
data_type : {'nuclide', 'element', 'material', 'macroscopic'}, optional
Type of object to plot. If not specified, a guess is made based on the
`this` argument.
axis : matplotlib.axes, optional
A previously generated axis to use for plotting. If not specified,
a new axis and figure will be generated.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable; only used
for items which are instances of openmc.Element or openmc.Nuclide
ce_cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
mg_cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None. This is only used for
items which are instances of openmc.Element
plot_CE : bool, optional
Denotes whether or not continuous-energy will be plotted. Defaults to
plotting the continuous-energy data.
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data. This only applies to plots of
multi-group data.
divisor_orders : Iterable of Integral, optional
Same as orders, but for divisor_types
**kwargs
All keyword arguments are passed to
:func:`matplotlib.pyplot.figure`.
Returns
-------
fig : matplotlib.figure.Figure
If axis is None, then a Matplotlib Figure of the generated
cross section will be returned. Otherwise, a value of
None will be returned as the figure and axes have already been
generated.
"""
import matplotlib.pyplot as plt
cv.check_type("plot_CE", plot_CE, bool)
if data_type is None:
if isinstance(this, openmc.Nuclide):
data_type = 'nuclide'
elif isinstance(this, openmc.Element):
data_type = 'element'
elif isinstance(this, openmc.Material):
data_type = 'material'
elif isinstance(this, openmc.Macroscopic):
data_type = 'macroscopic'
elif isinstance(this, str):
if this[-1] in string.digits:
data_type = 'nuclide'
else:
data_type = 'element'
else:
raise TypeError("Invalid type for plotting")
if plot_CE:
# Calculate for the CE cross sections
E, data = calculate_cexs(this, data_type, types, temperature, sab_name,
ce_cross_sections, enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_cexs(this, divisor_types, temperature,
sab_name, ce_cross_sections,
enrichment)
# Create a new union grid, interpolate data and data_div on to that
# grid, and then do the actual division
Enum = E[:]
E = np.union1d(Enum, Ediv)
data_new = np.zeros((len(types), len(E)))
for line in range(len(types)):
data_new[line, :] = \
np.divide(np.interp(E, Enum, data[line, :]),
np.interp(E, Ediv, data_div[line, :]))
if divisor_types[line] != 'unity':
types[line] = types[line] + ' / ' + divisor_types[line]
data = data_new
else:
# Calculate for MG cross sections
E, data = calculate_mgxs(this, data_type, types, orders, temperature,
mg_cross_sections, ce_cross_sections,
enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_mgxs(this, data_type, divisor_types,
divisor_orders, temperature,
mg_cross_sections,
ce_cross_sections, enrichment)
# Perform the division
for line in range(len(types)):
data[line, :] /= data_div[line, :]
if divisor_types[line] != 'unity':
types[line] += ' / ' + divisor_types[line]
# Generate the plot
if axis is None:
fig = plt.figure(**kwargs)
ax = fig.add_subplot(111)
else:
fig = None
ax = axis
# Set to loglog or semilogx depending on if we are plotting a data
# type which we expect to vary linearly
if set(types).issubset(PLOT_TYPES_LINEAR):
plot_func = ax.semilogx
else:
plot_func = ax.loglog
# Plot the data
for i in range(len(data)):
data[i, :] = np.nan_to_num(data[i, :])
if np.sum(data[i, :]) > 0.:
plot_func(E, data[i, :], label=types[i])
ax.set_xlabel('Energy [eV]')
if plot_CE:
ax.set_xlim(_MIN_E, _MAX_E)
else:
ax.set_xlim(E[-1], E[0])
if divisor_types:
if data_type == 'nuclide':
ylabel = 'Nuclidic Microscopic Data'
elif data_type == 'element':
ylabel = 'Elemental Microscopic Data'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Data'
else:
if data_type == 'nuclide':
ylabel = 'Microscopic Cross Section [b]'
elif data_type == 'element':
ylabel = 'Elemental Cross Section [b]'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Cross Section [1/cm]'
ax.set_ylabel(ylabel)
ax.legend(loc='best')
name = this.name if data_type == 'material' else this
if len(types) > 1:
ax.set_title('Cross Sections for ' + name)
else:
ax.set_title('Cross Section for ' + name)
return fig
def calculate_cexs(this, data_type, types, temperature=294., sab_name=None,
cross_sections=None, enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : {str, openmc.Nuclide, openmc.Element, openmc.Material}
Object to source data from
data_type : {'nuclide', 'element', material'}
Type of object to plot
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if sab_name:
cv.check_type('sab_name', sab_name, str)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
if data_type == 'nuclide':
if isinstance(this, str):
nuc = openmc.Nuclide(this)
else:
nuc = this
energy_grid, xs = _calculate_cexs_nuclide(nuc, types, temperature,
sab_name, cross_sections)
# Convert xs (Iterable of Callable) to a grid of cross section values
# calculated on @ the points in energy_grid for consistency with the
# element and material functions.
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
data[line, :] = xs[line](energy_grid)
elif data_type == 'element':
if isinstance(this, str):
elem = openmc.Element(this)
else:
elem = this
energy_grid, data = _calculate_cexs_elem_mat(elem, types, temperature,
cross_sections, sab_name,
enrichment)
elif data_type == 'material':
cv.check_type('this', this, openmc.Material)
energy_grid, data = _calculate_cexs_elem_mat(this, types, temperature,
cross_sections)
else:
raise TypeError("Invalid type")
return energy_grid, data
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None,
cross_sections=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Nuclide
Nuclide object to source data from
types : Iterable of str or Integral
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES or integers which correspond to reaction
channel (MT) numbers.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : Iterable of Callable
Requested cross section functions
"""
# Parse the types
mts = []
ops = []
yields = []
for line in types:
if line in PLOT_TYPES:
mts.append(PLOT_TYPES_MT[line])
if line.startswith('nu'):
yields.append(True)
else:
yields.append(False)
ops.append(PLOT_TYPES_OP[line])
else:
# Not a built-in type, we have to parse it ourselves
cv.check_type('MT in types', line, Integral)
cv.check_greater_than('MT in types', line, 0)
mts.append((line,))
ops.append(())
yields.append(False)
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
# Convert temperature to format needed for access in the library
strT = "{}K".format(int(round(temperature)))
T = temperature
# Now we can create the data sets to be plotted
energy_grid = []
xs = []
lib = library.get_by_material(this)
if lib is not None:
nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path'])
# Obtain the nearest temperature
if strT in nuc.temperatures:
nucT = strT
else:
delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
nucT = nuc.temperatures[closest_index]
# Prep S(a,b) data if needed
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
# Obtain the nearest temperature
if strT in sab.temperatures:
sabT = strT
else:
delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
sabT = sab.temperatures[closest_index]
# Create an energy grid composed the S(a,b) and the nuclide's grid
grid = nuc.energy[nucT]
sab_Emax = 0.
sab_funcs = []
if sab.elastic_xs:
elastic = sab.elastic_xs[sabT]
if isinstance(elastic, openmc.data.CoherentElastic):
grid = np.union1d(grid, elastic.bragg_edges)
if elastic.bragg_edges[-1] > sab_Emax:
sab_Emax = elastic.bragg_edges[-1]
elif isinstance(elastic, openmc.data.Tabulated1D):
grid = np.union1d(grid, elastic.x)
if elastic.x[-1] > sab_Emax:
sab_Emax = elastic.x[-1]
sab_funcs.append(elastic)
if sab.inelastic_xs:
inelastic = sab.inelastic_xs[sabT]
grid = np.union1d(grid, inelastic.x)
if inelastic.x[-1] > sab_Emax:
sab_Emax = inelastic.x[-1]
sab_funcs.append(inelastic)
energy_grid = grid
else:
energy_grid = nuc.energy[nucT]
for i, mt_set in enumerate(mts):
# Get the reaction xs data from the nuclide
funcs = []
op = ops[i]
for mt in mt_set:
if mt == 2:
if sab_name:
# Then we need to do a piece-wise function of
# The S(a,b) and non-thermal data
sab_sum = openmc.data.Sum(sab_funcs)
pw_funcs = openmc.data.Regions1D(
[sab_sum, nuc[mt].xs[nucT]],
[sab_Emax])
funcs.append(pw_funcs)
else:
funcs.append(nuc[mt].xs[nucT])
elif mt in nuc:
if yields[i]:
# Get the total yield first if available. This will be
# used primarily for fission.
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode == 'total':
func = openmc.data.Combination(
[nuc[mt].xs[nucT], prod.yield_],
[np.multiply])
funcs.append(func)
break
else:
# Total doesn't exist so we have to create from
# prompt and delayed. This is used for scatter
# multiplication.
func = None
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode != 'total':
if func:
func = openmc.data.Combination(
[prod.yield_, func], [np.add])
else:
func = prod.yield_
if func:
funcs.append(openmc.data.Combination(
[func, nuc[mt].xs[nucT]], [np.multiply]))
else:
# If func is still None, then there were no
# products. In that case, assume the yield is
# one as its not provided for some summed
# reactions like MT=4
funcs.append(nuc[mt].xs[nucT])
else:
funcs.append(nuc[mt].xs[nucT])
elif mt == UNITY_MT:
funcs.append(lambda x: 1.)
elif mt == XI_MT:
awr = nuc.atomic_weight_ratio
alpha = ((awr - 1.) / (awr + 1.))**2
xi = 1. + alpha * np.log(alpha) / (1. - alpha)
funcs.append(lambda x: xi)
else:
funcs.append(lambda x: 0.)
xs.append(openmc.data.Combination(funcs, op))
else:
raise ValueError(this + " not in library")
return energy_grid, xs
def _calculate_cexs_elem_mat(this, types, temperature=294.,
cross_sections=None, sab_name=None,
enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Material or openmc.Element
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
else:
T = temperature
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
if isinstance(this, openmc.Material):
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[1][0]: nuclide[1][1]
for nuclide in nuclides.items()}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[1][0]: nuclide[1][0]
for nuclide in nuclides.items()}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(1., 'ao', enrichment=enrichment,
cross_sections=cross_sections)
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[0]: nuclide[1] for nuclide in nuclides}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[0]: nuclide[0] for nuclide in nuclides}
# Identify the nuclides which have S(a,b) data
sabs = {}
for nuclide in nuclides.items():
sabs[nuclide[0]] = None
if isinstance(this, openmc.Material):
for sab_name in this._sab:
sab = openmc.data.ThermalScattering.from_hdf5(
library.get_by_material(sab_name)['path'])
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
else:
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
# Now we can create the data sets to be plotted
xs = {}
E = []
for nuclide in nuclides.items():
name = nuclide[0]
nuc = nuclide[1]
sab_tab = sabs[name]
temp_E, temp_xs = calculate_cexs(nuc, 'nuclide', types, T, sab_tab,
cross_sections)
E.append(temp_E)
# Since the energy grids are different, store the cross sections as
# a tabulated function so they can be calculated on any grid needed.
xs[name] = [openmc.data.Tabulated1D(temp_E, temp_xs[line])
for line in range(len(types))]
# Condense the data for every nuclide
# First create a union energy grid
energy_grid = E[0]
for grid in E[1:]:
energy_grid = np.union1d(energy_grid, grid)
# Now we can combine all the nuclidic data
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for nuclide in nuclides.items():
name = nuclide[0]
data[line, :] += (nuc_fractions[name] *
xs[name][line](energy_grid))
return energy_grid, data
def calculate_mgxs(this, data_type, types, orders=None, temperature=294.,
cross_sections=None, ce_cross_sections=None,
enrichment=None):
"""Calculates multi-group cross sections of a requested type.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : str or openmc.Material
Object to source data from
data_type : {'nuclide', 'element', material', 'macroscopic'}
Type of object to plot
types : Iterable of values of PLOT_TYPES_MGXS
The type of cross sections to calculate
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding an openmc.Element object passed as this
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
cv.check_iterable_type('types', types, str)
cv.check_type("cross_sections", cross_sections, str)
library = openmc.MGXSLibrary.from_hdf5(cross_sections)
if data_type in ('nuclide', 'macroscopic'):
mgxs = _calculate_mgxs_nuc_macro(this, types, library, orders,
temperature)
elif data_type in ('element', 'material'):
mgxs = _calculate_mgxs_elem_mat(this, types, library, orders,
temperature, ce_cross_sections,
enrichment)
else:
raise TypeError("Invalid type")
# Convert the data to the format needed
data = np.zeros((len(types), 2 * library.energy_groups.num_groups))
energy_grid = np.zeros(2 * library.energy_groups.num_groups)
for g in range(library.energy_groups.num_groups):
energy_grid[g * 2: g * 2 + 2] = \
library.energy_groups.group_edges[g: g + 2]
# Ensure the energy will show on a log-axis by replacing 0s with a
# sufficiently small number
energy_grid[0] = max(energy_grid[0], _MIN_E)
for line in range(len(types)):
for g in range(library.energy_groups.num_groups):
data[line, g * 2: g * 2 + 2] = mgxs[line, g]
return energy_grid[::-1], data
def _calculate_mgxs_nuc_macro(this, types, library, orders=None,
temperature=294.):
"""Determines the multi-group cross sections of a nuclide or macroscopic
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Nuclide or openmc.Macroscopic
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check the parameters and grab order/delayed groups
if orders:
cv.check_iterable_type('orders', orders, Integral,
min_depth=len(types), max_depth=len(types))
else:
orders = [None] * len(types)
for i, line in enumerate(types):
cv.check_type("line", line, str)
cv.check_value("line", line, PLOT_TYPES_MGXS)
if orders[i]:
cv.check_greater_than("order value", orders[i], 0, equality=True)
xsdata = library.get_by_name(this)
if xsdata is not None:
# Obtain the nearest temperature
t = np.abs(xsdata.temperatures - temperature).argmin()
# Get the data
data = np.zeros((len(types), library.energy_groups.num_groups))
for i, line in enumerate(types):
if 'fission' in line and not xsdata.fissionable:
continue
elif line == 'unity':
data[i, :] = 1.
else:
# Now we have to get the cross section data and properly
# treat it depending on the requested type.
# First get the data in a generic fashion
temp_data = getattr(xsdata, _PLOT_MGXS_ATTR[line])[t]
shape = temp_data.shape[:]
# If we have angular data, then want the geometric
# average over all provided angles. Since the angles are
# equi-distant, un-weighted averaging will suffice
if xsdata.representation == 'angle':
temp_data = np.mean(temp_data, axis=(0, 1))
# Now we can look at the shape of the data to identify how
# it should be modified to produce an array of values
# with groups.
if shape in (xsdata.xs_shapes["[G']"],
xsdata.xs_shapes["[G]"]):
# Then the data is already an array vs groups so copy
# and move along
data[i, :] = temp_data
elif shape == xsdata.xs_shapes["[G][G']"]:
# Sum the data over outgoing groups to create our array vs
# groups
data[i, :] = np.sum(temp_data, axis=1)
elif shape == xsdata.xs_shapes["[DG]"]:
# Then we have a constant vs groups with a value for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i]]
else:
data[i, :] = np.sum(temp_data[:])
elif shape in (xsdata.xs_shapes["[DG][G']"],
xsdata.xs_shapes["[DG][G]"]):
# Then we have an array vs groups with values for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[DG][G][G']"]:
# Then we have a delayed group matrix. We will first
# remove the outgoing group dependency
temp_data = np.sum(temp_data, axis=-1)
# And then proceed in exactly the same manner as the
# "[DG][G']" or "[DG][G]" shapes in the previous block.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[G][G'][Order]"]:
# This is a scattering matrix with angular data
# First remove the outgoing group dependence
temp_data = np.sum(temp_data, axis=1)
# The user either provided a specific order or we resort
# to the default 0th order
if orders[i]:
order = orders[i]
else:
order = 0
# If the order is available, store the data for that order
# if it is not available, then the expansion coefficient
# is zero and thus we already have the correct value.
if order < shape[1]:
data[i, :] = temp_data[:, order]
else:
raise ValueError("{} not present in provided MGXS "
"library".format(this))
return data
def _calculate_mgxs_elem_mat(this, types, library, orders=None,
temperature=294., ce_cross_sections=None,
enrichment=None):
"""Determines the multi-group cross sections of an element or material
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Element or openmc.Material
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding the elements
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
# Check to see if we have nuclides/elements or a macrocopic object
if this._macroscopic is not None:
# We have macroscopics
nuclides = {this._macroscopic: (this._macroscopic, this.density)}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out nuc and nuc_density
nuc_fraction = [nuclide[1][1] for nuclide in nuclides.items()]
else:
T = temperature
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(100., 'ao', enrichment=enrichment,
cross_sections=ce_cross_sections)
# For ease of processing split out nuc and nuc_fractions
nuc_fraction = [nuclide[1] for nuclide in nuclides]
nuc_data = []
for nuclide in nuclides.items():
nuc_data.append(_calculate_mgxs_nuc_macro(nuclide[0], types, library,
orders, T))
# Combine across the nuclides
data = np.zeros((len(types), library.energy_groups.num_groups))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for n in range(len(nuclides)):
data[line, :] += nuc_fraction[n] * nuc_data[n][line, :]
return data
| johnnyliu27/openmc | openmc/plotter.py | Python | mit | 38,526 |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Simple data processing pipeline plugin for Ginga.
**Plugin Type: Local**
``Pipeline`` is a local plugin, which means it is associated with a channel.
An instance can be opened for each channel.
**Usage**
"""
import os.path
import tempfile
from ginga import GingaPlugin
from ginga.util import pipeline
from ginga.gw import Widgets
from ginga.util.stages.stage_info import get_stage_catalog
__all__ = ['Pipeline']
class Pipeline(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Pipeline, self).__init__(fv, fitsimage)
# TEMP: make user selectable
self.save_file = "pipeline.yml"
# Load preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Pipeline')
self.settings.set_defaults(output_suffix='-pipe')
self.settings.load(onError='silent')
self.stage_dict = get_stage_catalog(self.logger)
self.stage_classes = list(self.stage_dict.values())
self.stage_names = list(self.stage_dict.keys())
self.stage_names.sort()
stages = [self.stage_dict['input'](), self.stage_dict['output']()]
self.pipeline = pipeline.Pipeline(self.logger, stages)
self.pipeline.add_callback('stage-executing', self.stage_status, 'X')
self.pipeline.add_callback('stage-done', self.stage_status, 'D')
self.pipeline.add_callback('stage-errored', self.stage_status, 'E')
self.pipeline.add_callback('pipeline-start', self.clear_status)
self.pipeline.set(fv=self.fv, viewer=self.fitsimage)
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
top.set_spacing(2)
tbar = Widgets.Toolbar(orientation='horizontal')
menu = tbar.add_menu('Pipe', mtype='menu')
menu.set_tooltip("Operation on pipeline")
item = menu.add_name('Load')
item.set_tooltip("Load a new pipeline")
item.add_callback('activated', self.load_pipeline_cb)
item = menu.add_name('Save')
item.set_tooltip("Save this pipeline")
item.add_callback('activated', self.save_pipeline_cb)
menu = tbar.add_menu('Edit', mtype='menu')
menu.set_tooltip("Edit on pipeline")
item = menu.add_name('Undo')
item.set_tooltip("Undo last action")
item.add_callback('activated', self.undo_pipeline_cb)
item = menu.add_name('Redo')
item.set_tooltip("Redo last action")
item.add_callback('activated', self.redo_pipeline_cb)
name = Widgets.TextEntry(editable=True)
name.add_callback('activated', self.set_pipeline_name_cb)
name.set_text(self.pipeline.name)
self.w.pipeline_name = name
tbar.add_widget(name)
top.add_widget(tbar, stretch=0)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(2)
self.pipelist = vbox
for stage in self.pipeline:
stage_gui = self.make_stage_gui(stage)
vbox.add_widget(stage_gui, stretch=0)
# add stretch
vbox.add_widget(Widgets.Label(''), stretch=1)
# wrap it in a scrollbox
scr = Widgets.ScrollArea()
scr.set_widget(vbox)
name = self.pipeline.name
if len(name) > 20:
name = name[:20] + '...'
fr = Widgets.Frame("Pipeline: {}".format(name))
self.w.gui_fr = fr
fr.set_widget(scr)
#top.add_widget(scr, stretch=1)
top.add_widget(fr, stretch=1)
tbar = Widgets.Toolbar(orientation='horizontal')
btn = tbar.add_action('Del')
btn.add_callback('activated', self.delete_stage_cb)
btn.set_tooltip("Delete selected stages")
self.w.delete = btn
btn = tbar.add_action('Ins')
btn.set_tooltip("Insert above selected stage")
btn.add_callback('activated', self.insert_stage_cb)
self.w.insert = btn
btn = tbar.add_action('Up')
btn.set_tooltip("Move selected stage up")
btn.add_callback('activated', self.move_stage_cb, 'up')
self.w.move_up = btn
btn = tbar.add_action('Dn')
btn.set_tooltip("Move selected stage down")
btn.add_callback('activated', self.move_stage_cb, 'down')
self.w.move_dn = btn
btn = tbar.add_action('Clr')
btn.set_tooltip("Clear selection")
btn.add_callback('activated', lambda w: self.clear_selected())
self.w.clear = btn
self.insert_menu = Widgets.Menu()
for name in self.stage_names:
item = self.insert_menu.add_name(name)
item.add_callback('activated', self._insert_stage_cb, name)
btn = tbar.add_action('Run')
btn.set_tooltip("Run entire pipeline")
btn.add_callback('activated', self.run_pipeline_cb)
self.w.run = btn
btn = tbar.add_action('En', toggle=True)
btn.set_tooltip("Enable pipeline")
btn.set_state(self.pipeline.enabled)
btn.add_callback('activated', self.enable_pipeline_cb)
self.w.enable = btn
top.add_widget(tbar, stretch=0)
status = Widgets.Label('')
self.w.pipestatus = status
top.add_widget(status, stretch=0)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
self._update_toolbar()
container.add_widget(top, stretch=1)
self.gui_up = True
def make_stage_gui(self, stage):
_vbox = Widgets.VBox()
hbox = Widgets.HBox()
xpd = Widgets.Expander(title=str(stage), notoggle=True)
tbar = Widgets.Toolbar(orientation='horizontal')
chk = tbar.add_action('B', toggle=True)
chk.add_callback('activated', self.bypass_stage_cb, stage)
chk.set_tooltip("Bypass this stage")
chk = tbar.add_action('S', toggle=True)
chk.add_callback('activated', self.select_stage_cb)
chk.set_tooltip("Select this stage")
stage.w.select = chk
chk = tbar.add_action('C', toggle=True)
chk.add_callback('activated', self.configure_stage_cb, xpd)
chk.set_tooltip("Configure this stage")
hbox.add_widget(tbar, stretch=0)
ent = Widgets.TextEntry(str(stage))
ent.add_callback('activated', self.rename_stage_cb, stage)
ent.set_tooltip("Rename this stage")
hbox.add_widget(ent, stretch=1)
_vbox.add_widget(hbox, stretch=0)
stage.build_gui(xpd)
xpd.add_callback('opened', lambda w: stage.resume())
xpd.add_callback('closed', lambda w: stage.pause())
stage.gui_up = True
_vbox.add_widget(xpd, stretch=0)
stage.w.gui = _vbox
return _vbox
def close(self):
chname = self.fv.get_channel_name(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
self.gui_up = False
return True
def start(self):
for stage in self.pipeline:
stage.start()
# load any image in the channel into the start of the pipeline
image = self.fitsimage.get_image()
if image is not None:
self.pipeline[0].set_image(image)
def pause(self):
pass
def resume(self):
pass
def stop(self):
self.gui_up = False
for stage in self.pipeline:
stage.stop()
def set_pipeline_name_cb(self, widget):
name = widget.get_text().strip()
self.pipeline.name = name
if len(name) > 20:
name = name[:20] + '...'
self.w.gui_fr.set_text(name)
def bypass_stage_cb(self, widget, tf, stage):
idx = self.pipeline.index(stage)
stage.bypass(tf)
self.pipeline.run_from(stage)
def _update_toolbar(self):
stages = self.get_selected_stages()
self.w.insert.set_enabled(len(stages) <= 1)
self.w.delete.set_enabled(len(stages) >= 1)
self.w.move_up.set_enabled(len(stages) == 1)
self.w.move_dn.set_enabled(len(stages) == 1)
self.w.clear.set_enabled(len(stages) >= 1)
self.w.run.set_enabled(len(stages) <= 1)
self.w.enable.set_enabled(len(self.pipeline) > 0)
def select_stage_cb(self, widget, tf):
self._update_toolbar()
def configure_stage_cb(self, widget, tf, xpd):
xpd.expand(tf)
def rename_stage_cb(self, widget, stage):
name = widget.get_text().strip()
stage.name = name
def get_selected_stages(self):
res = [stage for stage in self.pipeline
if stage.w.select.get_state()]
return res
def clear_selected(self):
for stage in self.pipeline:
stage.w.select.set_state(False)
self._update_toolbar()
def _remove_stage(self, stage, destroy=False):
self.pipeline.remove(stage)
self.pipelist.remove(stage.w.gui, delete=destroy)
if destroy:
# destroy stage gui
stage.gui_up = False
stage.stop()
stage.w = None
def delete_stage_cb(self, widget):
stages = self.get_selected_stages()
self.clear_selected()
for stage in stages:
self._remove_stage(stage, destroy=True)
def insert_stage_cb(self, widget):
stages = self.get_selected_stages()
if len(stages) > 1:
self.fv.show_error("Please select at most only one stage",
raisetab=True)
return
self.insert_menu.popup()
def _insert_stage_cb(self, widget, name):
stages = self.get_selected_stages()
if len(stages) == 1:
stage = stages[0]
idx = self.pipeline.index(stage)
else:
idx = len(stages)
# realize this stage
stage = self.stage_dict[name]()
self.pipeline._init_stage(stage)
self.pipeline.insert(idx, stage)
stage_gui = self.make_stage_gui(stage)
self.pipelist.insert_widget(idx, stage_gui, stretch=0)
def _relocate_stage(self, idx, stage):
self._remove_stage(stage, destroy=False)
self.pipeline.insert(idx, stage)
self.pipelist.insert_widget(idx, stage.w.gui, stretch=0)
def move_up(self, stage):
idx = self.pipeline.index(stage)
if idx == 0:
# stage is already at the top
return
self._relocate_stage(idx - 1, stage)
def move_down(self, stage):
idx = self.pipeline.index(stage)
if idx == len(self.pipeline) - 1:
# stage is already at the bottom
return
self._relocate_stage(idx + 1, stage)
def move_stage_cb(self, widget, direction):
stages = self.get_selected_stages()
if len(stages) != 1:
self.fv.show_error("Please select only a single stage",
raisetab=True)
stage = stages[0]
if direction == 'up':
self.move_up(stage)
else:
self.move_down(stage)
def run_pipeline_cb(self, widget):
self.pipeline.run_all()
def run_pipeline_partial_cb(self, widget):
stages = self.get_selected_stages()
if len(stages) == 0:
self.pipeline.run_all()
return
if len(stages) != 1:
self.fv.show_error("Please select only a single stage",
raisetab=True)
return
stage = stages[0]
self.pipeline.run_from(stage)
def enable_pipeline_cb(self, widget, tf):
self.pipeline.enable(tf)
def undo_pipeline_cb(self, widget):
self.pipeline.undo()
def redo_pipeline_cb(self, widget):
self.pipeline.redo()
def save_pipeline(self, path):
import yaml
d = self.pipeline.save()
with open(path, 'w') as out_f:
out_f.write(yaml.dump(d))
def load_pipeline(self, path):
import yaml
self.pipelist.remove_all(delete=True)
self.pipelist.add_widget(Widgets.Label(''), stretch=1)
with open(path, 'r') as in_f:
s = in_f.read()
d = yaml.safe_load(s)
self.pipeline.load(d, self.stage_dict)
self.pipeline.set(fv=self.fv, viewer=self.fitsimage)
for i, stage in enumerate(self.pipeline):
stage_gui = self.make_stage_gui(stage)
self.pipelist.insert_widget(i, stage_gui, stretch=0)
name = self.pipeline.name
self.w.pipeline_name.set_text(name)
if len(name) > 20:
name = name[:20] + '...'
self.w.gui_fr.set_text(name)
def save_pipeline_cb(self, widget):
save_file = os.path.join(tempfile.gettempdir(), self.save_file)
self.save_pipeline(save_file)
def load_pipeline_cb(self, widget):
save_file = os.path.join(tempfile.gettempdir(), self.save_file)
self.load_pipeline(save_file)
def redo(self):
image = self.fitsimage.get_image()
if image is not None:
stage0 = self.pipeline[0]
stage0.set_image(image)
def stage_status(self, pipeline, stage, txt):
if stage.gui_up:
self.w.pipestatus.set_text(txt + ': ' + stage.name)
self.fv.update_pending()
def clear_status(self, pipeline, stage):
for stage in pipeline:
if stage.gui_up:
self.w.pipestatus.set_text(stage.name)
self.fv.update_pending()
def __str__(self):
return 'pipeline'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_Pipeline', package='ginga')
# END
| pllim/ginga | ginga/rv/plugins/Pipeline.py | Python | bsd-3-clause | 14,214 |
# Generated by Django 2.2.16 on 2020-10-15 17:48
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0050_app_user_profiles'),
]
operations = [
migrations.AddField(
model_name='billingaccount',
name='block_email_domains_from_hubspot',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=253, null=True), default=list, blank=True, null=True, size=None),
),
migrations.AddField(
model_name='billingaccount',
name='block_hubspot_data_for_all_users',
field=models.BooleanField(default=False),
),
]
| dimagi/commcare-hq | corehq/apps/accounting/migrations/0051_hubspot_restrictions.py | Python | bsd-3-clause | 773 |
from devilry.apps.core import models
from devilry.simplified import FieldSpec, FilterSpec, FilterSpecs
class SimplifiedExaminerMetaMixin(object):
""" Defines the django model to be used, resultfields returned by
search and which fields can be used to search for a Examiner object
using the Simplified API """
model = models.Examiner
resultfields = FieldSpec('id',
'assignmentgroup')
searchfields = FieldSpec()
filters = FilterSpecs(FilterSpec('id'),
FilterSpec('assignmentgroup'),
FilterSpec('assignmentgroup__parentnode'), # Assignment
FilterSpec('assignmentgroup__parentnode__parentnode'), # Period
FilterSpec('assignmentgroup__parentnode__parentnode__parentnode') # Subject
)
| vegarang/devilry-django | devilry/coreutils/simplified/metabases/examiner.py | Python | bsd-3-clause | 867 |
#!/usr/bin/python2.6
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
import unittest
from concurrence import dispatch, Tasklet
from mg.constructor import *
from mg.core.cass import CassandraPool
from mg.core.memcached import MemcachedPool
from mg.mmorpg.inventory_classes import DBItemType, DBItemTypeParams
from uuid import uuid4
class TestItems(unittest.TestCase, ConstructorModule):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.inst = Instance("test", "test")
self.inst._dbpool = CassandraPool((("localhost", 9160),))
self.inst._mcpool = MemcachedPool()
self.app_obj = Application(self.inst, "mgtest")
self.app_obj.modules.load(["mg.mmorpg.inventory.Inventory", "mg.core.l10n.L10n",
"mg.constructor.script.ScriptEngine"])
ConstructorModule.__init__(self, self.app_obj, "mg.test.testitems.TestItems")
mc = Memcached(prefix="mgtest-")
mc.delete("Cassandra-CF-mgtest-Data")
def test(self):
# creating parameters metadata
config = self.app().config_updater()
params = []
for i in xrange(1, 6):
params.append({
"code": "param%d" % i,
"description": "Parameter %d" % i,
"grp": "",
"default": i,
"visual_mode": 0,
"order": 0.0,
"type": 0,
"name": "Parameter %d" % i,
})
config.set("item-types.params", params)
config.store()
# creating test item type
db_item_type = self.obj(DBItemType)
db_item_type.set("name", "Test Item")
db_item_type.set("name_lower", "test item")
db_item_type.store()
# creating item type parameters
db_params = self.obj(DBItemTypeParams, db_item_type.uuid, data={})
db_params.set("param1", 10)
db_params.set("param2", 20)
db_params.store()
# testing ItemType class
item_type = self.item_type(db_item_type.uuid)
self.assertTrue(item_type)
self.assertEqual(item_type.name, "Test Item")
self.assertEqual(item_type.param("param1"), 10)
self.assertEqual(item_type.param("param2"), 20)
self.assertEqual(item_type.param("param3"), 3)
self.assertEqual(item_type.param("param4"), 4)
self.assertEqual(item_type.param("param5"), 5)
# creating member inventory
inv = self.call("inventory.get", "char", uuid4().hex)
# testing MemberInventory class
self.assertEqual(len(inv.items()), 0)
inv.give(item_type.uuid, 5)
self.assertEqual(len(inv.items()), 1)
# reloading inventory contents
inv = self.call("inventory.get", "char", inv.uuid)
self.assertEqual(len(inv.items()), 1)
inv_item_type, inv_quantity = inv.items()[0]
self.assertEqual(inv_item_type.uuid, item_type.uuid)
self.assertEqual(inv_quantity, 5)
# giving items
inv.give(item_type.uuid, 3)
self.assertEqual(len(inv.items()), 1)
inv_item_type, inv_quantity = inv.items()[0]
self.assertEqual(inv_item_type.uuid, item_type.uuid)
self.assertEqual(inv_quantity, 8)
# taking items
inv.take_type(item_type.uuid, 7)
self.assertEqual(len(inv.items()), 1)
inv_item_type, inv_quantity = inv.items()[0]
self.assertEqual(inv_item_type.uuid, item_type.uuid)
self.assertEqual(inv_quantity, 1)
inv.take_type(item_type.uuid, 1)
self.assertEqual(len(inv.items()), 0)
# giving some items back
inv.give(item_type.uuid, 2)
# creating item object
item = self.item(inv, db_item_type.uuid)
self.assertTrue(item)
# testing translation of calls to the underlying item type
self.assertEqual(item.name, "Test Item")
self.assertEqual(item.param("param1"), 10)
self.assertEqual(item.param("param2"), 20)
self.assertEqual(item.param("param3"), 3)
self.assertEqual(item.param("param4"), 4)
self.assertEqual(item.param("param5"), 5)
# modifying item
item.set_param("param3", 30)
items = inv.items()
self.assertEqual(len(items), 2)
items.sort(cmp=lambda x, y: cmp(x[0].dna, y[0].dna))
self.assertEqual(items[0][0].dna, item.uuid)
self.assertEqual(items[0][1], 1)
self.assertEqual(items[0][0].param("param1"), 10)
self.assertEqual(items[0][0].param("param2"), 20)
self.assertEqual(items[0][0].param("param3"), 3)
self.assertEqual(items[0][0].param("param4"), 4)
self.assertEqual(items[0][0].param("param5"), 5)
self.assertEqual(items[1][0].dna, item.dna)
self.assertEqual(items[1][1], 1)
self.assertEqual(items[1][0].param("param1"), 10)
self.assertEqual(items[1][0].param("param2"), 20)
self.assertEqual(items[1][0].param("param3"), 30)
self.assertEqual(items[1][0].param("param4"), 4)
self.assertEqual(items[1][0].param("param5"), 5)
# modifying this item again
item.set_param("param4", 40)
items = inv.items()
self.assertEqual(len(items), 2)
items.sort(cmp=lambda x, y: cmp(x[0].dna, y[0].dna))
self.assertEqual(items[0][0].dna, item.uuid)
self.assertEqual(items[0][1], 1)
self.assertEqual(items[0][0].param("param1"), 10)
self.assertEqual(items[0][0].param("param2"), 20)
self.assertEqual(items[0][0].param("param3"), 3)
self.assertEqual(items[0][0].param("param4"), 4)
self.assertEqual(items[0][0].param("param5"), 5)
self.assertEqual(items[1][0].dna, item.dna)
self.assertEqual(items[1][1], 1)
self.assertEqual(items[1][0].param("param1"), 10)
self.assertEqual(items[1][0].param("param2"), 20)
self.assertEqual(items[1][0].param("param3"), 30)
self.assertEqual(items[1][0].param("param4"), 40)
self.assertEqual(items[1][0].param("param5"), 5)
# modifying another item to the same value
item = self.item(inv, db_item_type.uuid)
item.set_param("param3", 30)
item.set_param("param4", 40)
items = inv.items()
self.assertEqual(len(items), 1)
self.assertEqual(items[0][0].dna, item.dna)
self.assertEqual(items[0][1], 2)
self.assertEqual(items[0][0].param("param1"), 10)
self.assertEqual(items[0][0].param("param2"), 20)
self.assertEqual(items[0][0].param("param3"), 30)
self.assertEqual(items[0][0].param("param4"), 40)
self.assertEqual(items[0][0].param("param5"), 5)
if __name__ == "__main__":
dispatch(unittest.main)
| JoyTeam/metagam | mg/test/testitems.py | Python | gpl-3.0 | 7,360 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import gc
import os
import pickle
import time
from helpers import unittest
import luigi
import mock
import psutil
from luigi.worker import Worker
def running_children():
children = set()
process = psutil.Process(os.getpid())
for child in process.children():
if child.is_running():
children.add(child.pid)
return children
@contextlib.contextmanager
def pause_gc():
if not gc.isenabled():
yield
try:
gc.disable()
yield
finally:
gc.enable()
class SlowCompleteWrapper(luigi.WrapperTask):
def requires(self):
return [SlowCompleteTask(i) for i in range(4)]
class SlowCompleteTask(luigi.Task):
n = luigi.IntParameter()
def complete(self):
time.sleep(0.1)
return True
class OverlappingSelfDependenciesTask(luigi.Task):
n = luigi.IntParameter()
k = luigi.IntParameter()
def complete(self):
return self.n < self.k or self.k == 0
def requires(self):
return [OverlappingSelfDependenciesTask(self.n - 1, k) for k in range(self.k + 1)]
class ExceptionCompleteTask(luigi.Task):
def complete(self):
assert False
class ExceptionRequiresTask(luigi.Task):
def requires(self):
assert False
class UnpicklableExceptionTask(luigi.Task):
def complete(self):
class UnpicklableException(Exception):
pass
raise UnpicklableException()
class ParallelSchedulingTest(unittest.TestCase):
def setUp(self):
self.sch = mock.Mock()
self.w = Worker(scheduler=self.sch, worker_id='x')
def added_tasks(self, status):
return [kw['task_id'] for args, kw in self.sch.add_task.call_args_list if kw['status'] == status]
def test_children_terminated(self):
before_children = running_children()
with pause_gc():
self.w.add(
OverlappingSelfDependenciesTask(5, 2),
multiprocess=True,
)
self.assertLessEqual(running_children(), before_children)
def test_multiprocess_scheduling_with_overlapping_dependencies(self):
self.w.add(OverlappingSelfDependenciesTask(5, 2), True)
self.assertEqual(15, self.sch.add_task.call_count)
self.assertEqual(set((
OverlappingSelfDependenciesTask(n=1, k=1).task_id,
OverlappingSelfDependenciesTask(n=2, k=1).task_id,
OverlappingSelfDependenciesTask(n=2, k=2).task_id,
OverlappingSelfDependenciesTask(n=3, k=1).task_id,
OverlappingSelfDependenciesTask(n=3, k=2).task_id,
OverlappingSelfDependenciesTask(n=4, k=1).task_id,
OverlappingSelfDependenciesTask(n=4, k=2).task_id,
OverlappingSelfDependenciesTask(n=5, k=2).task_id,
)), set(self.added_tasks('PENDING')))
self.assertEqual(set((
OverlappingSelfDependenciesTask(n=0, k=0).task_id,
OverlappingSelfDependenciesTask(n=0, k=1).task_id,
OverlappingSelfDependenciesTask(n=1, k=0).task_id,
OverlappingSelfDependenciesTask(n=1, k=2).task_id,
OverlappingSelfDependenciesTask(n=2, k=0).task_id,
OverlappingSelfDependenciesTask(n=3, k=0).task_id,
OverlappingSelfDependenciesTask(n=4, k=0).task_id,
)), set(self.added_tasks('DONE')))
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_complete(self, send):
self.w.add(ExceptionCompleteTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('assert False' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_unpicklable_exception_in_complete(self, send):
# verify exception can't be pickled
self.assertRaises(Exception, UnpicklableExceptionTask().complete)
try:
UnpicklableExceptionTask().complete()
except Exception as e:
ex = e
self.assertRaises(pickle.PicklingError, pickle.dumps, ex)
# verify this can run async
self.w.add(UnpicklableExceptionTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('raise UnpicklableException()' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_requires(self, send):
self.w.add(ExceptionRequiresTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
if __name__ == '__main__':
unittest.main()
| bmaggard/luigi | test/worker_parallel_scheduling_test.py | Python | apache-2.0 | 5,302 |
#!/usr/bin/env python
"""
Various signal-related context managers
"""
from contextlib import contextmanager
import signal
@contextmanager
def ExceptionOnSignal(s=signal.SIGUSR1, e=Exception, i=None):
"""
Raise a specific exception when the specified signal is detected.
"""
def handler(signum, frame):
if i is not None:
raise e('signal %i detected in %s' % (s, i))
else:
raise e('signal %i detected' % s)
signal.signal(s, handler)
yield
@contextmanager
def TryExceptionOnSignal(s=signal.SIGUSR1, e=Exception, i=None):
"""
Check for exception raised in response to specific signal.
"""
with ExceptionOnSignal(s, e, i):
try:
yield
except e:
pass
@contextmanager
def IgnoreSignal(s=signal.SIGUSR1):
"""
Ignore the specified signal.
"""
signal.signal(s, signal.SIG_IGN)
yield
@contextmanager
def IgnoreKeyboardInterrupt():
"""
Ignore keyboard interrupts.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
yield
@contextmanager
def OnKeyboardInterrupt(handler):
"""
Respond to keyboard interrupt with specified handler.
"""
signal.signal(signal.SIGINT, handler)
yield
if __name__ == '__main__':
# This example should quit when Ctrl-C is pressed:
import time
def handler(signum, frame):
print 'caught'
handler.done = True
handler.done = False
with OnKeyboardInterrupt(handler):
while True:
print 'waiting'
time.sleep(1)
if handler.done:
break
| cerrno/neurokernel | neurokernel/ctx_managers.py | Python | bsd-3-clause | 1,627 |
#!/usr/bin/env python
# coding: utf-8
import sys
import os
import argparse
import datetime
import time
import random
# import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import fashion_mnist
from solver import solve_isotropic_covariance, symKL_objective
import shared_var
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=300)
parser.add_argument('--gpu_option', action='store_true')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--num_outputs", type=int, default=2)
parser.add_argument('--max_norm', action='store_true')
parser.add_argument('--sumKL', action='store_true')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
gpu_option = args.gpu_option
if gpu_option:
gpus = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs Available: ", len(gpus))
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError as e:
print(e)
else:
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
print("current available GPUs: {}".format(
len(tf.config.experimental.list_physical_devices('GPU'))))
# batch_size = args.batch_size
batch_size_test = 200
is_shuffle = True
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = tf.cast(x_train, tf.float32) / 255
x_test = tf.cast(x_test, tf.float32) / 255
total_training_instances = len(x_train)
total_test_instances = len(x_test)
num_batchs = total_training_instances / args.batch_size
print(
"# training: {}, # test: {}, # batchs: {}".format(
total_training_instances,
total_test_instances,
num_batchs))
def change_label(y, ratio=10):
def condition(x):
if x == 1:
if random.randint(0, ratio) <= 1:
return 1
return 0
l = [1 if i == 1 else 0 for i in y]
res = np.array(list(map(condition, l)))
# res = np.array(list(map(lambda x: condition(x), l)))
print("positive ratio: {}".format(sum(res) / len(l)))
return res
if args.num_outputs == 2:
y_train = change_label(y_train)
y_test = change_label(y_test)
# is_shuffle = False
if is_shuffle:
train_ds_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train)).\
shuffle(
total_training_instances + 1, reshuffle_each_iteration=True).\
batch(args.batch_size)
test_ds_iter = tf.data.Dataset.from_tensor_slices((x_test, y_test)).\
shuffle(
total_test_instances + 1, reshuffle_each_iteration=True).\
batch(batch_size_test)
else:
train_ds_iter = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).batch(args.batch_size)
test_ds_iter = tf.data.Dataset.from_tensor_slices(
(x_test, y_test)).batch(batch_size_test)
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
if sys.platform.startswith('win'):
num_workers = 0
else:
num_workers = 4
num_inputs = 784
if args.debug:
hidden_outputs_1 = 6
hidden_outputs_2 = 4
else:
hidden_outputs_1 = 128
hidden_outputs_2 = 32
num_outputs = 1
W = tf.Variable(
tf.random.normal(
shape=(
num_inputs,
hidden_outputs_1),
mean=0,
stddev=0.01,
dtype=tf.float32))
W1 = tf.Variable(
tf.random.normal(
shape=(
hidden_outputs_1,
hidden_outputs_2),
mean=0,
stddev=0.01,
dtype=tf.float32))
W2 = tf.Variable(
tf.random.normal(
shape=(
hidden_outputs_2,
num_outputs),
mean=0,
stddev=0.01,
dtype=tf.float32))
b = tf.Variable(tf.zeros(hidden_outputs_1, dtype=tf.float32))
b1 = tf.Variable(tf.zeros(hidden_outputs_2, dtype=tf.float32))
b2 = tf.Variable(tf.zeros(num_outputs, dtype=tf.float32))
def softmax(logits, axis=-1):
return tf.nn.softmax(logits, axis=axis)
def cross_entropy(y_hat, y):
# return tf.nn.softmax_cross_entropy_with_logits(y, y_hat)
y = tf.cast(tf.reshape(y, shape=[-1, 1]), dtype=tf.int32)
y = tf.one_hot(y, depth=y_hat.shape[-1])
y = tf.cast(tf.reshape(y, shape=[-1, y_hat.shape[-1]]), dtype=tf.int32)
return -tf.math.log(tf.boolean_mask(y_hat, y) + 1e-8)
def sigmoid_cross_entropy(y_hat, y):
y = tf.cast(tf.reshape(y, shape=[-1, 1]), dtype=tf.float32)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=y_hat)
return loss
train_auc = tf.keras.metrics.AUC()
leakage_auc_baseline = tf.keras.metrics.AUC(num_thresholds=1000)
leakage_auc_baseline_all = tf.keras.metrics.AUC(num_thresholds=1000)
leakage_auc_masked_hiddenlayer_1 = tf.keras.metrics.AUC(num_thresholds=1000)
leakage_auc_masked_hiddenlayer_1_all = tf.keras.metrics.AUC(
num_thresholds=1000)
leakage_auc_masked_hiddenlayer_2 = tf.keras.metrics.AUC(num_thresholds=1000)
leakage_auc_not_masked_hiddenlayer_2 = tf.keras.metrics.AUC(
num_thresholds=1000)
def normalized_norm(vector):
norm_g = tf.norm(vector, axis=-1, keepdims=False)
norm_g = norm_g / tf.math.reduce_max(norm_g)
return norm_g
def label_leakge_auc(y, predicted_value, m_auc):
predicted_value = tf.math.abs(predicted_value)
val_max = tf.math.reduce_max(predicted_value)
val_min = tf.math.reduce_min(predicted_value)
pred = (predicted_value - val_min) / (val_max - val_min + 1e-12)
m_auc.update_state(tf.reshape(y, [-1, 1]), tf.reshape(pred, [-1, 1]))
# @tf.custom_gradient
# def gradient_masking_2(x):
# # add scalar noise with aligning the maximum norm (positive instance)
# global _Batch_Labels
# _Batch_Labels = tf.reshape(
# tf.cast(_Batch_Labels, dtype=tf.float32), [-1, 1])
#
# def grad_fn(g):
# g_norm = tf.reshape(tf.norm(g, axis=1, keepdims=True), [-1, 1])
# max_norm = tf.reduce_max(g_norm * _Batch_Labels)
# stds = tf.sqrt(tf.maximum(max_norm ** 2 /
# (g_norm ** 2 + 1e-32) - 1.0, 0.0))
# standard_gaussian_noise = tf.random.normal(
# tf.shape(_Batch_Labels), mean=0.0, stddev=1.0)
# gaussian_noise = standard_gaussian_noise * stds
# res = g * (1 + gaussian_noise)
# return res
# return x, grad_fn
@tf.custom_gradient
def gradient_masking(x):
# add scalar noise to align with the maximum norm in the batch
# (expectation norm alignment)
def grad_fn(g):
g_norm = tf.reshape(tf.norm(g, axis=1, keepdims=True), [-1, 1])
max_norm = tf.reduce_max(g_norm)
stds = tf.sqrt(tf.maximum(max_norm ** 2 /
(g_norm ** 2 + 1e-32) - 1.0, 0.0))
standard_gaussian_noise = tf.random.normal(
shape=(tf.shape(g)[0], 1), mean=0.0, stddev=1.0)
gaussian_noise = standard_gaussian_noise * stds
res = g * (1 + gaussian_noise)
return res
return x, grad_fn
@tf.custom_gradient
def KL_gradient_perturb(x):
uv_choice = "uv" # "uv"
init_scale = 1.0
p_frac = 'pos_frac'
dynamic = True
error_prob_lower_bound = None
sumKL_threshold = 0.16 # 0.25 #0.81 #0.64#0.16 #0.64
if dynamic and (error_prob_lower_bound is not None):
sumKL_threshold = (2 - 4 * error_prob_lower_bound)**2
# print('error_prob_lower_bound', error_prob_lower_bound)
# print('implied sumKL_threshold', sumKL_threshold)
# elif dynamic:
# print('using sumKL_threshold', sumKL_threshold)
# global _Batch_Labels
batch_y = tf.reshape(tf.cast(shared_var.G_Batch_Labels, \
dtype=tf.float32), [-1, 1])
def grad_fn(g):
# logging.info("gradient shape_g: {}".format(tf.shape(g)))
# print('start')
# start = time.time()
y = batch_y
# pos_g = g[y==1]
pos_g = tf.boolean_mask(
g, tf.tile(
tf.cast(
y, dtype=tf.int32), [
1, tf.shape(g)[1]]))
pos_g = tf.reshape(pos_g, [-1, tf.shape(g)[1]])
pos_g_mean = tf.math.reduce_mean(
pos_g, axis=0, keepdims=True) # shape [1, d]
pos_coordinate_var = tf.reduce_mean(
tf.math.square(
pos_g - pos_g_mean),
axis=0) # use broadcast
# neg_g = g[y==0]
neg_g = tf.boolean_mask(g, tf.tile(
1 - tf.cast(y, dtype=tf.int32), [1, tf.shape(g)[1]]))
neg_g = tf.reshape(neg_g, [-1, tf.shape(g)[1]])
neg_g_mean = tf.math.reduce_mean(
neg_g, axis=0, keepdims=True) # shape [1, d]
neg_coordinate_var = tf.reduce_mean(
tf.math.square(neg_g - neg_g_mean), axis=0)
avg_pos_coordinate_var = tf.reduce_mean(pos_coordinate_var)
avg_neg_coordinate_var = tf.reduce_mean(neg_coordinate_var)
if tf.math.is_nan(avg_pos_coordinate_var) or tf.math.is_nan(
avg_neg_coordinate_var):
if args.debug:
print("no negative/positive instances in this batch")
return g
g_diff = pos_g_mean - neg_g_mean
# g_diff_norm = float(tf.norm(tensor=g_diff).numpy())
g_diff_norm = tf.norm(tensor=g_diff)
if uv_choice == 'uv':
u = avg_neg_coordinate_var
v = avg_pos_coordinate_var
elif uv_choice == 'same':
u = (avg_neg_coordinate_var + avg_pos_coordinate_var) / 2.0
v = (avg_neg_coordinate_var + avg_pos_coordinate_var) / 2.0
elif uv_choice == 'zero':
# logging.info("uv_choice: zero")
u, v = 0.0, 0.0
# d = float(g.shape[1])
# d = float(tf.shape(g)[1])
d = tf.cast(tf.shape(g)[1], dtype=tf.float32)
if p_frac == 'pos_frac':
# p = float(tf.math.reduce_mean(y))
# p = float(tf.reshape(tf.math.reduce_mean(y), []))
p = tf.math.reduce_mean(y)
# p = float(tf.reduce_sum(y) / len(y)) # p is set as the fraction
# of positive in the batch
else:
p = float(p_frac)
scale = init_scale
g_norm_square = g_diff_norm ** 2
# def print_tensor(pos_g_mean, neg_g_mean, g_diff):
# logging.info(
# "gradient pos_g_mean: {}, neg_g_mean: {}".format(
# np.mean(pos_g_mean),
# np.mean(neg_g_mean)))
# logging.info(
# "gradient pos_g_max: {}, neg_g_max: {}".format(
# np.amax(pos_g_mean),
# np.amax(neg_g_mean)))
# logging.info(
# "gradient pos_g_min: {}, neg_g_min: {}".format(
# np.amin(pos_g_mean),
# np.amin(neg_g_mean)))
# logging.info("gradient pos_g_norm: {}, neg_g_norm: {}".format(
# np.linalg.norm(pos_g_mean), np.linalg.norm(neg_g_mean)))
# logging.info(
# "gradient g_diff_mean: {}, g_diff_min: {}, g_diff_max: {}, \
# g_diff_norm: {}".format(
# np.mean(g_diff),
# np.amin(g_diff),
# np.amax(g_diff),
# np.linalg.norm(g_diff)))
def compute_lambdas_tf2(
u,
v,
scale,
d,
g_norm_square,
p,
sumKL_threshold,
pos_g_mean,
neg_g_mean,
g_diff):
if args.debug:
print(
"u: {}, v:{}, scale:{}, d:{}, g_diff_norm_square:{}, p:{}, \
sumKL_threshold:{}".format(
u,
v,
scale,
d,
g_norm_square,
p,
sumKL_threshold))
# kl_obj = symKL_objective(
# 0.0, 0.0, 0.0, 0.0, u, v, d, g_norm_square)
# if args.debug:
# print(
# "u: {}, v:{}, scale:{}, d:{}, g_diff_norm_square:{}, \
# p:{}, sumKL_threshold:{}, current_kl: {}".format(
# u,
# v,
# scale,
# d,
# g_norm_square,
# p,
# sumKL_threshold,
# kl_obj))
# if kl_obj < sumKL_threshold:
# if args.debug:
# print(
# "lam10: {}, lam20: {}, lam11:{}, lam21:{}, \
# sumKL:{}".format(
# 0.0, 0.0, 0.0, 0.0, kl_obj))
# return np.float32(0.0), np.float32(
# 0.0), np.float32(0.0), np.float32(0.0), kl_obj
lam10, lam20, lam11, lam21 = None, None, None, None
start = time.time()
while True:
P = scale * g_norm_square
lam10, lam20, lam11, lam21, sumKL = \
solve_isotropic_covariance(u=u,
v=v,
d=d,
g_norm_square=g_norm_square,
p=p,
P=P,
lam10_init=lam10,
lam20_init=lam20,
lam11_init=lam11,
lam21_init=lam21)
if args.debug:
print('scale: {}, sumKL: {}, P:{}'.format(scale, sumKL, P))
if not dynamic or sumKL <= sumKL_threshold:
break
scale *= 1.5 # loosen the power constraint
if args.debug:
print(
"lam10: {}, lam20: {}, lam11:{}, lam21:{}, sumKL:{}".format(
lam10, lam20, lam11, lam21, sumKL))
print(
'solve_isotropic_covariance solving time: {}'.format(
time.time() - start))
return lam10, lam20, lam11, lam21, sumKL
def compute_lambdas_tf1(
u,
v,
scale,
d,
g_norm_square,
p,
sumKL_threshold,
pos_g_mean,
neg_g_mean,
g_diff):
# print_tensor(pos_g_mean, neg_g_mean, g_diff)
u = np.float32(np.asscalar(u))
v = np.float32(np.asscalar(v))
scale = np.float32(np.asscalar(scale))
d = np.float32(np.asscalar(d))
g_norm_square = np.float32(np.asscalar(g_norm_square))
p = np.float32(np.asscalar(p))
sumKL_threshold = np.float32(np.asscalar(sumKL_threshold))
kl_obj = symKL_objective(
np.float32(0.0),
np.float32(0.0),
np.float32(0.0),
np.float32(0.0),
u,
v,
d,
g_norm_square)
# logging.info(
# "u: {}, v:{}, scale:{}, d:{}, g_diff_norm_square:{}, p:{},\
# sumKL_threshold:{}, current_kl: {}".format(
# u,
# v,
# scale,
# d,
# g_norm_square,
# p,
# sumKL_threshold,
# kl_obj))
if kl_obj < sumKL_threshold:
# logging.info(
# "lam10: {}, lam20: {}, lam11:{}, lam21:{}, sumKL:{} \
# ".format(0.0, 0.0, 0.0, 0.0, kl_obj))
return np.float32(0.0), np.float32(
0.0), np.float32(0.0), np.float32(0.0), kl_obj
lam10, lam20, lam11, lam21 = None, None, None, None
start = time.time()
while True:
P = scale * g_norm_square
lam10, lam20, lam11, lam21, sumKL = \
solve_isotropic_covariance(u=u,
v=v,
d=d,
g_norm_square=g_norm_square,
p=p,
P=P,
lam10_init=lam10,
lam20_init=lam20,
lam11_init=lam11,
lam21_init=lam21)
# logging.info(
# 'scale: {}, sumKL: {}, P:{}, type_scale: {}, type_sumKL: \
# {}, type_P:{}'.format(
# scale, sumKL, P, type(scale), type(sumKL), type(P)))
if not dynamic or sumKL <= sumKL_threshold:
break
scale *= np.float32(1.5) # loosen the power constraint
# logging.info(
# "lam10: {}, lam20: {}, lam11:{}, lam21:{}, sumKL:{}".format(
# lam10, lam20, lam11, lam21, sumKL))
# logging.info(
# "math.sqrt(lam10-lam20): {}, math.sqrt(lam11 - lam21): \
# {}".format(
# np.sqrt(
# (lam10 - lam20)),
# np.sqrt(
# (lam11 - lam21))))
# logging.info(
# "math.sqrt(lam10-lam20)/g_diff_norm: {}, math.sqrt(lam11 - \
# lam21)/g_diff_norm: {}".format(
# np.sqrt(
# (lam10 - lam20) / g_norm_square),
# np.sqrt(
# (lam11 - lam21) / g_norm_square)))
# logging.info(
# 'solve_isotropic_covariance solving time: {}'.format(
# time.time() - start))
return lam10, lam20, lam11, lam21, sumKL
# tensorflow 1.x
# lam10, lam20, lam11, lam21, sumKL =
# tf.py_func(compute_lambdas_tf1, [u, v, scale, d, g_norm_square, p, \
# sumKL_threshold,
# pos_g_mean, neg_g_mean, g_diff], [tf.float32, tf.float32, tf.float32,
# tf.float32, tf.float32])
lam10, lam20, lam11, lam21, sumKL = compute_lambdas_tf2(
u, v, scale, d, g_norm_square, p, sumKL_threshold, pos_g_mean, \
neg_g_mean, g_diff)
lam10, lam20, lam11, lam21, sumKL = tf.reshape(
lam10, shape=[1]), tf.reshape(
lam20, shape=[1]), tf.reshape(
lam11, shape=[1]), tf.reshape(
lam21, shape=[1]), tf.reshape(
sumKL, shape=[1])
perturbed_g = g
y_float = tf.cast(y, dtype=tf.float32)
noise_1 = tf.reshape(tf.multiply(x=tf.random.normal(shape= \
tf.shape(y)), y=y_float), \
shape=(-1, 1)) * g_diff * \
(tf.math.sqrt(tf.math.abs(lam11 - lam21)) / g_diff_norm)
noise_1 = tf.debugging.check_numerics(
noise_1, "noise_1 ERROR", name="noise_1_debugging")
noise_2 = tf.random.normal(shape=tf.shape(
g)) * tf.reshape(y_float, shape=(-1, 1)) * \
tf.math.sqrt(tf.math.maximum(lam21, 0.0))
noise_2 = tf.debugging.check_numerics(
noise_2, "noise_2 ERROR", name="noise_2_debugging")
noise_3 = tf.reshape(tf.multiply(x=tf.random.normal(shape=\
tf.shape(y)), y=1 - y_float),
shape=(-1, 1)) * g_diff * \
(tf.math.sqrt(tf.math.abs(lam10 - lam20)) / g_diff_norm)
noise_3 = tf.debugging.check_numerics(
noise_3, "noise_3 ERROR", name="noise_3_debugging")
noise_4 = tf.random.normal(shape=tf.shape(
g)) * tf.reshape(1 - y_float, shape=(-1, 1)) * \
tf.math.sqrt(tf.math.maximum(lam20, 0.0))
noise_4 = tf.debugging.check_numerics(
noise_4, "noise_3 ERROR", name="noise_4_debugging")
perturbed_g += (noise_1 + noise_2 + noise_3 + noise_4)
perturbed_g = tf.debugging.check_numerics(
perturbed_g, "perturbed_g ERROR", name="perturbed_g_debugging")
return perturbed_g
return x, grad_fn
def compute_gradient_norm(gradient, label):
g_norm = tf.reshape(tf.norm(gradient, axis=1, keepdims=True), [-1])
label = tf.cast(tf.reshape(label, [-1]), dtype=tf.float32)
pos_g_norm = tf.boolean_mask(g_norm, label)
neg_label = tf.cast(label < 1.0, dtype=tf.float32)
neg_g_norm = tf.boolean_mask(g_norm, neg_label)
print("g_norm: {}".format(g_norm))
print("pos_norm: {}".format(pos_g_norm))
print("neg_norm: {}".format(neg_g_norm))
return g_norm, pos_g_norm, neg_g_norm
def middle_attack(gradient, label, select_positive=True):
g_norm = tf.reshape(tf.norm(gradient, axis=1, keepdims=True), [-1])
label = tf.cast(tf.reshape(label, [-1]), dtype=tf.float32)
indices = tf.argsort(
g_norm,
axis=-1,
direction="ASCENDING",
stable=True,
name="middle_attack_arg_sort")
if not select_positive:
# select negative instances
label = tf.cast(label < 1.0, dtype=tf.float32)
g_norm_label = g_norm * label
norm_label = tf.gather(g_norm_label, indices)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(norm_label, zero)
res = tf.where(mask)
# res = tf.sort(res, axis = -1, direction = "DESCENDING")
# print("positive_instances: {}, # instances: {}, norm rank indices:
# mean: {}, min: {}, max: {}".format(select_positive, res.shape,
# tf.reduce_mean(res), tf.reduce_min(res), tf.reduce_max(res)))
return res
def train(
train_iter,
test_iter,
loss,
num_epochs,
params=None,
trainer=None,
regularization_weight=0.1):
best_test_auc = 0
best_epoch = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
leakage_auc_baseline.reset_states()
leakage_auc_baseline_all.reset_states()
leakage_auc_masked_hiddenlayer_1.reset_states()
leakage_auc_masked_hiddenlayer_2.reset_states()
leakage_auc_masked_hiddenlayer_1_all.reset_states()
leakage_auc_not_masked_hiddenlayer_2.reset_states()
train_auc.reset_states()
e_s = datetime.datetime.now()
gradient_list = []
gradient_list_1 = []
gradient_list_2 = []
gradient_list_3 = []
label_list = []
for (idx, (X, y)) in enumerate(train_iter):
# global _Batch_Labels, _Batch_Positive_Predicted_Probabilities
batch_size = X.shape[0]
b_s = datetime.datetime.now()
shared_var.G_Batch_Labels = y
shared_var.G_Batch_Positive_Predicted_Probabilities = \
tf.math.sigmoid(predict(X))
with tf.GradientTape(persistent=False) as tape:
hidden_logits = tf.nn.relu(
tf.matmul(tf.reshape(X, shape=(-1, W.shape[0])), W) + b)
hidden_logits_2 = tf.nn.relu(tf.matmul(tf.reshape(
hidden_logits, shape=(-1, W1.shape[0])), W1) + b1)
hidden_logits_2_masking = hidden_logits_2
# using different perturbation methods
if args.max_norm:
hidden_logits_2_masking = gradient_masking(hidden_logits_2)
if args.sumKL:
hidden_logits_2_masking = KL_gradient_perturb(
hidden_logits_2)
logits = tf.matmul(tf.reshape(
hidden_logits_2_masking, shape=(-1, W2.shape[0])), W2) + b2
l = tf.reduce_sum(loss(logits, y)) + regularization_weight * \
(tf.norm(W) ** 2 + tf.norm(W2) ** 2 + tf.norm(W1) ** 2)
grads = tape.gradient(l,
params + [hidden_logits,
hidden_logits_2,
hidden_logits_2_masking,
logits])
y = tf.cast(y, dtype=tf.float32)
for i, param in enumerate(params):
trainer.apply_gradients([(grads[i] / batch_size, param)])
if args.num_outputs == 2:
label_leakge_auc(y, tf.norm(
grads[-1], axis=-1, keepdims=False), leakage_auc_baseline)
label_leakge_auc(y,
tf.norm(grads[-2],
axis=-1,
keepdims=False),
leakage_auc_not_masked_hiddenlayer_2)
label_leakge_auc(y,
tf.norm(grads[-3],
axis=-1,
keepdims=False),
leakage_auc_masked_hiddenlayer_2)
label_leakge_auc(y,
tf.norm(grads[-4],
axis=-1,
keepdims=False),
leakage_auc_masked_hiddenlayer_1)
gradient_list.append(grads[-1])
gradient_list_1.append(grads[-2])
gradient_list_2.append(grads[-3])
gradient_list_3.append(grads[-4])
label_list.append(tf.reshape(y, [-1, 1]))
train_l_sum += l.numpy()
train_auc.update_state(tf.reshape(
y, [-1, 1]), tf.reshape(tf.math.sigmoid(logits), [-1, 1]))
n += y.shape[0]
b_e = datetime.datetime.now()
gradients_stack = tf.concat(gradient_list, axis=0)
gradients_stack_1 = tf.concat(gradient_list_1, axis=0)
gradients_stack_2 = tf.concat(gradient_list_2, axis=0)
gradients_stack_3 = tf.concat(gradient_list_3, axis=0)
labels_stack = tf.concat(label_list, axis=0)
pos_norm_ranking_order_baseline = middle_attack(
gradients_stack, labels_stack)
neg_norm_ranking_order_baseline = middle_attack(
gradients_stack, labels_stack, select_positive=False)
label_leakge_auc(
labels_stack,
tf.norm(
gradients_stack,
axis=-1,
keepdims=False),
leakage_auc_baseline_all)
label_leakge_auc(
labels_stack,
tf.norm(
gradients_stack_2,
axis=-1,
keepdims=False),
leakage_auc_masked_hiddenlayer_1_all)
pos_norm_ranking_order_non_masking = middle_attack(
gradients_stack_1, labels_stack)
pos_norm_ranking_order_masking_1 = middle_attack(
gradients_stack_2, labels_stack)
neg_norm_ranking_order_masking_1 = middle_attack(
gradients_stack_2, labels_stack, select_positive=False)
pos_norm_ranking_order_masking_2 = middle_attack(
gradients_stack_3, labels_stack)
gradients_stack_2_n, gradients_stack_2_pos_n, \
gradients_stack_2_neg_n \
= \
compute_gradient_norm(
gradients_stack_2, labels_stack)
gradients_stack_baseline_n, gradients_stack_baseline_pos_n, \
gradients_stack_baseline_neg_n \
= \
compute_gradient_norm(
gradients_stack_1, labels_stack)
e_e = datetime.datetime.now()
print(
"epoch: {}, loss: {}, train auc: {}, time used: {}".
format(
epoch,
train_l_sum / n,
train_auc.result(),
e_e - e_s))
if args.num_outputs == 2:
print(
"epoch: {}, leak_auc baseline_all: {}, masked_HL_1_all: {}".
format(
epoch, leakage_auc_baseline_all.result(),
leakage_auc_masked_hiddenlayer_1_all.result()))
print(
"baseline leak_auc:{}, non_masking: {}".
format(
leakage_auc_baseline.result(),
leakage_auc_not_masked_hiddenlayer_2.result()))
print("masking L1:{}, masking L2: {}".
format(
leakage_auc_masked_hiddenlayer_1.result(),
leakage_auc_masked_hiddenlayer_2.result()))
test_loss, test_auc = test(test_iter, loss)
with writer.as_default():
tf.summary.scalar('train_loss', train_l_sum / n, step=epoch)
tf.summary.scalar('train_auc', train_auc.result(), step=epoch)
# tf.summary.scalar('train_auc', train_acc_sum / n, step=epoch)
if args.num_outputs == 2:
tf.summary.scalar(
'leakage_auc_baseline',
leakage_auc_baseline.result(),
step=epoch)
tf.summary.scalar(
'leakage_auc_not_masked_hiddenlayer_2',
leakage_auc_not_masked_hiddenlayer_2.result(),
step=epoch)
tf.summary.scalar(
'leakage_auc_masked_hiddenlayer_1',
leakage_auc_masked_hiddenlayer_1.result(),
step=epoch)
tf.summary.scalar(
'leakage_auc_masked_hiddenlayer_2',
leakage_auc_masked_hiddenlayer_2.result(),
step=epoch)
tf.summary.histogram(
"pos_norm_ranking_order_non_masking",
pos_norm_ranking_order_non_masking / n,
step=epoch)
tf.summary.histogram(
"pos_norm_ranking_order_baseline",
pos_norm_ranking_order_baseline / n,
step=epoch)
tf.summary.histogram(
"pos_norm_ranking_order_masking_1",
pos_norm_ranking_order_masking_1 / n,
step=epoch)
tf.summary.histogram(
"pos_norm_ranking_order_masking_2",
pos_norm_ranking_order_masking_2 / n,
step=epoch)
tf.summary.histogram(
"neg_norm_ranking_order_baseline",
neg_norm_ranking_order_baseline / n,
step=epoch)
tf.summary.histogram(
"neg_norm_ranking_order_masking_1",
neg_norm_ranking_order_masking_1 / n,
step=epoch)
tf.summary.histogram(
"gradients_layer_2_norm",
gradients_stack_2_n,
step=epoch)
tf.summary.histogram(
"gradients_layer_2_pos_norm",
gradients_stack_2_pos_n,
step=epoch)
tf.summary.histogram(
"gradients_layer_2_neg_norm",
gradients_stack_2_neg_n,
step=epoch)
tf.summary.histogram(
"gradients_norm_baseline_n",
gradients_stack_baseline_n,
step=epoch)
tf.summary.histogram(
"gradients_norm_baseline_pos_n",
gradients_stack_baseline_pos_n,
step=epoch)
tf.summary.histogram(
"gradients_norm_baseline_neg_n",
gradients_stack_baseline_neg_n,
step=epoch)
tf.summary.scalar('test_loss', test_loss, step=epoch)
tf.summary.scalar('test_auc', test_auc, step=epoch)
if test_auc > best_test_auc:
best_test_auc = max(test_auc, best_test_auc)
best_epoch = epoch
print("current best test auc: {}".format(best_test_auc))
print("current best epoch: {}".format(best_epoch))
print("best test auc: {}".format(best_test_auc))
print("best epoch: {}".format(best_epoch))
def predict(X):
hidden_logits = tf.nn.relu(
tf.matmul(tf.reshape(X, shape=(-1, W.shape[0])), W) + b)
hidden_logits_2 = tf.nn.relu(tf.matmul(tf.reshape(
hidden_logits, shape=(-1, W1.shape[0])), W1) + b1)
logits = tf.matmul(tf.reshape(
hidden_logits_2, shape=(-1, W2.shape[0])), W2) + b2
return logits
def test(test_iter, loss):
test_l_sum, test_acc_sum, n = 0.0, 0.0, 0
test_auc = tf.keras.metrics.AUC()
test_auc.reset_states()
for (idx, (X, y)) in enumerate(test_iter):
logits = predict(X)
l = tf.reduce_sum(loss(logits, y))
y = tf.cast(y, dtype=tf.float32)
test_l_sum += l.numpy()
test_auc.update_state(tf.reshape(
y, [-1, 1]), tf.reshape(tf.math.sigmoid(logits), [-1, 1]))
n += y.shape[0]
print("test loss: {}, test auc: {}".format(
test_l_sum / n, test_auc.result()))
return test_l_sum / n, test_auc.result()
# Set up logging.
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = 'logs/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
regularization_weight_l2 = 0.5
activation = "relu"
ada_gra_lr = 0.01
trainer_opt = tf.keras.optimizers.Adagrad(learning_rate=ada_gra_lr)
t_s = datetime.datetime.now()
print(
"gpu: {}, batch_size: {}, regularization: {}, ada_gra_lr: {}"
.format(
gpu_option,
args.batch_size,
regularization_weight_l2,
ada_gra_lr))
# global _Batch_Labels, _Batch_Positive_Predicted_Probabilities
# _Batch_Labels, _Batch_Positive_Predicted_Probabilities = None, None
train(
train_ds_iter,
test_ds_iter,
sigmoid_cross_entropy,
args.num_epochs,
params=[
W,
b,
W1,
b1,
W2,
b2],
trainer=trainer_opt,
regularization_weight=regularization_weight_l2)
print(
"gpu: {}, batch_size: {}, regularization: {}, ada_gra_lr: {}".
format(
gpu_option,
args.batch_size,
regularization_weight_l2,
ada_gra_lr))
t_e = datetime.datetime.now()
print(
"# training: {}, # test: {}, #_batchs: {}, training used: {}".
format(
total_training_instances,
total_test_instances,
num_batchs,
t_e -
t_s))
| bytedance/fedlearner | example/privacy/label_protection/FL_Label_Protection_FMNIST_Demo.py | Python | apache-2.0 | 35,016 |
from __future__ import absolute_import
from sentry.models import ProjectKey, ProjectKeyStatus
from sentry.testutils import TestCase
class ProjectKeyTest(TestCase):
model = ProjectKey
def test_generate_api_key(self):
assert len(self.model.generate_api_key()) == 32
def test_from_dsn(self):
key = self.model.objects.create(project_id=1, public_key="abc", secret_key="xyz")
assert self.model.from_dsn("http://abc@testserver/1") == key
assert self.model.from_dsn("http://[email protected]/1") == key
with self.assertRaises(self.model.DoesNotExist):
self.model.from_dsn("http://xxx@testserver/1")
with self.assertRaises(self.model.DoesNotExist):
self.model.from_dsn("abc")
def test_get_default(self):
key = self.projectkey
self.model.objects.create(project=self.project, status=ProjectKeyStatus.INACTIVE)
assert (
self.model.objects.filter(project=self.project).count() == 2
), self.model.objects.all()
assert self.model.get_default(self.project) == key
def test_is_active(self):
assert self.model(project=self.project, status=ProjectKeyStatus.INACTIVE).is_active is False
assert self.model(project=self.project, status=ProjectKeyStatus.ACTIVE).is_active is True
def test_get_dsn(self):
key = self.model(project_id=1, public_key="abc", secret_key="xyz")
assert key.dsn_private == "http://abc:xyz@testserver/1"
assert key.dsn_public == "http://abc@testserver/1"
assert key.csp_endpoint == "http://testserver/api/1/csp-report/?sentry_key=abc"
assert key.minidump_endpoint == "http://testserver/api/1/minidump/?sentry_key=abc"
assert key.unreal_endpoint == "http://testserver/api/1/unreal/abc/"
def test_get_dsn_org_subdomain(self):
with self.feature("organizations:org-subdomains"):
key = self.model(project_id=1, public_key="abc", secret_key="xyz")
host = "o{}.ingest.testserver".format(key.project.organization_id)
assert key.dsn_private == "http://abc:xyz@{}/1".format(host)
assert key.dsn_public == "http://abc@{}/1".format(host)
assert key.csp_endpoint == "http://{}/api/1/csp-report/?sentry_key=abc".format(host)
assert key.minidump_endpoint == "http://{}/api/1/minidump/?sentry_key=abc".format(host)
assert key.unreal_endpoint == "http://{}/api/1/unreal/abc/".format(host)
| beeftornado/sentry | tests/sentry/models/test_projectkey.py | Python | bsd-3-clause | 2,502 |
#
# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from keystone import exception
try: from oslo_log import log
except ImportError: from keystone.openstack.common import log
try: from oslo_config import cfg
except ImportError: from oslo.config import cfg
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class SendMail(object):
def send_email(self, to, subject, text):
dest = [to] # must be a list
#
# Prepare actual message
#
mimemsg = MIMEMultipart()
mimemsg['From'] = CONF.spassword.smtp_from
mimemsg['To'] = to
mimemsg['Subject'] = subject
body = text
mimemsg.attach(MIMEText(body, 'plain'))
msg = mimemsg.as_string()
#
# Send the mail
#
try:
# TODO: server must be initialized by current object
server = smtplib.SMTP(CONF.spassword.smtp_server,
CONF.spassword.smtp_port)
except smtplib.socket.gaierror:
LOG.error('SMTP socket error %s %s' % (
CONF.spassword.smtp_server, CONF.spassword.smtp_port))
return False
# Use tls for smtp if CONF.spassword.smtp_tls is True
if CONF.spassword.smtp_tls:
server.ehlo()
server.starttls()
# Use auth only if smtp_user and smtp_password not empty
if CONF.spassword.smtp_user and CONF.spassword.smtp_password:
try:
server.login(CONF.spassword.smtp_user,
CONF.spassword.smtp_password)
except smtplib.SMTPAuthenticationError:
LOG.error('SMTP authentication error %s' % CONF.spassword.smtp_user)
return False
try:
server.sendmail(CONF.spassword.smtp_from, dest, msg)
except Exception, ex: # try to avoid catching Exception unless you have too
LOG.error('SMTP sendmail error %s' % ex)
return False
finally:
server.quit()
LOG.info('email was sent to %s' % dest)
return True
| telefonicaid/fiware-keystone-spassword | keystone_spassword/contrib/spassword/mailer.py | Python | apache-2.0 | 2,991 |
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template to create a KMS Key. The Fn::GetAtt is used to retrieve the ARN",
"Resources": {
"myKey": {
"Type": "AWS::KMS::Key",
"Properties": {
"Description": "Sample KmsKey",
"EnableKeyRotation": False,
"Enabled": True,
"KeyPolicy": {
"Version": "2012-10-17",
"Id": "key-default-1",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": {
"Fn::Join": [
"",
[
"arn:aws:iam::",
{"Ref": "AWS::AccountId"},
":root",
],
]
}
},
"Action": "kms:*",
"Resource": "*",
}
],
},
},
}
},
"Outputs": {
"KeyArn": {
"Description": "Generated Key Arn",
"Value": {"Fn::GetAtt": ["myKey", "Arn"]},
}
},
}
| spulec/moto | tests/test_cloudformation/fixtures/kms_key.py | Python | apache-2.0 | 1,604 |
"""Placeholder models for Teams"""
from teams.base_models import PersonBase
from teams.base_models import SquadBase
class PersonPlaceholder(PersonBase):
"""Person with a Placeholder to edit content"""
@property
def html_content(self):
"""No additional formatting is necessary"""
return self.content
class Meta:
# def __init__(self, *args, **kwargs):
# super(Meta, self).__init__()
"""PersonPlaceholder's Meta"""
abstract = True
class SquadPlaceholder(SquadBase):
"""Squad with a Placeholder to edit content"""
@property
def html_content(self):
"""No additional formatting is necessary"""
return self.content
class Meta:
"""SquadPlaceholder's Meta"""
abstract = True
| cwilhelm/django-teams | teams_demo/demo/placeholder.py | Python | bsd-3-clause | 788 |
from datetime import datetime
import logging
import pdb
from django import db
from django.core.cache import cache
from django.test import TestCase
from models import TestModelA, TestModelB, TestModelC
logger = logging.getLogger(__name__)
class SimpleTest(TestCase):
"There should be one DB query to get the data, then any further attempts to get the same data should bypass the DB, unless invalidated."
def setUp(self):
with self.assertNumQueries(2):
self.ta1 = TestModelA(name="foo")
self.ta1.save()
self.ta2 = TestModelA(name="bar")
self.ta2.save()
def test_basic_select_caching(self):
"Test that a whole queryset is retrieved from the cache the second time it's needed."
with self.assertNumQueries(1):
tas1 = TestModelA.objects.all()
self.assertEqual(len(tas1), 2)
with self.assertNumQueries(0):
tas2 = TestModelA.objects.all()
self.assertEqual(len(tas2), 2)
self.assertEqual(len(tas1), len(tas2))
for i in xrange(0, len(tas1)):
self.assertEqual(tas1[i], tas2[i])
def test_caches_with_filter(self):
"Test that a filtered queryset is retrieved from the cache the second time it's needed."
with self.assertNumQueries(1):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas2 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas2), 1)
self.assertEqual(tas1[0], tas2[0])
def test_filtering_caches_two_seperate_entries(self):
"Test that a filtered queryset is retrieved from the cache the second time it's needed, and that the cached values are different."
with self.assertNumQueries(1):
len(TestModelA.objects.filter(name__contains='f'))
with self.assertNumQueries(1):
len(TestModelA.objects.filter(name__contains='b'))
with self.assertNumQueries(0):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas2 = TestModelA.objects.filter(name__contains='b')
self.assertEqual(len(tas2), 1)
self.assertNotEqual(tas1[0], tas2[0])
def test_cache_invalidation_on_single_table_when_saving_after_get(self):
"Test that a call to save() invalidates the cache and results in a new DB query."
with self.assertNumQueries(1):
tas1a = TestModelA.objects.get(id=self.ta1.id)
with self.assertNumQueries(0):
tas1a = TestModelA.objects.get(id=self.ta1.id)
with self.assertNumQueries(2):
#the save() method triggers a SELECT using the ID. Even though we already did a get(), the query generated by save() is slightly different.
tas1a.save()
with self.assertNumQueries(1):
tas1a = TestModelA.objects.get(id=self.ta1.id)
with self.assertNumQueries(0):
tas1a = TestModelA.objects.get(id=self.ta1.id)
def test_cache_invalidation_on_single_table_when_saving_after_filter(self):
"Test that a call to save() invalidates the cache and results in a new DB query."
with self.assertNumQueries(1):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(2):
#the save() method triggers a SELECT using the ID, and that specific query hasn't been stored yet
tas1[0].save()
with self.assertNumQueries(1):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
def test_cache_invalidation_on_single_table_when_updating(self):
"Test that a call to update() invalidates the cache and results in a new DB query."
with self.assertNumQueries(1):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(1):
TestModelA.objects.update(last_edited=datetime.now())
with self.assertNumQueries(1):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
with self.assertNumQueries(0):
tas1 = TestModelA.objects.filter(name__contains='f')
self.assertEqual(len(tas1), 1)
#TODO: test select_related(), invalidation from related model
#TODO: test one type of query on a model doesn't screw up other queries
#TODO: test that updating one entry will invalidate all cached entries for that table
#TODO: more testing for utility functions
#TODO: test extra(), annotate(), aggregate(), and raw SQL
#TODO: use proper func wrapping when patching
#TODO: test size limiting and table exclusion features
#TODO: test transaction rollback
def tearDown(self):
#TODO: add code to automatically clear cache between tests when installed
cache.clear()
| SeanHayes/django-query-caching | django_query_caching_test_project/apps/test_app/tests.py | Python | bsd-3-clause | 4,946 |
import argparse
import os
import subprocess
import time
def parse_command_line():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='A utility used to backup tank data')
parser.add_argument('--hadoop_home', default=os.getcwd(),
help='The local hadoop home directory')
parser.add_argument('--cluster', default='lgprc-xiaomi',
help='The hadoop cluster name')
parser.add_argument('--backup_root', default='/user/h_tank',
help='The backup root directory')
parser.add_argument('--tank_home', default=os.getcwd(),
help='The tank home directory')
args = parser.parse_args()
return args
def backup_sqlite(args):
cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-mkdir',
'-p', '%s/sqlite/' % args.backup_root]
print cmd
subprocess.check_call(cmd)
cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-copyFromLocal',
'%s/sqlite/tank.db' % args.tank_home,
'%s/sqlite/tank.db.%d' % (args.backup_root, int(time.time()))]
print cmd
subprocess.check_call(cmd)
def backup_data(args):
for dir in os.listdir('%s/data' % args.tank_home):
if dir.startswith('.'):
continue
cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-mkdir',
'-p', '%s/data/%s' % (args.backup_root, dir)]
print cmd
subprocess.check_call(cmd)
tag_file = '%s/data/%s/tags' % (args.tank_home, dir)
fp = open(tag_file, 'a+')
print tag_file
backed_dirs = [d.strip() for d in fp.readlines()]
total_dirs = [d for d in os.listdir(
'%s/data/%s' % (args.tank_home, dir)) if not d.startswith('.')]
diff_dirs = list(set(total_dirs) - set(backed_dirs) - set(['tags']))
for d in diff_dirs:
# only backup package whose modification time is older than 30min
mod_time = os.path.getmtime('%s/data/%s/%s' % (
args.tank_home, dir, d))
if time.time() - mod_time < 1800:
continue
cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-copyFromLocal',
'%s/data/%s/%s' % (args.tank_home, dir, d),
'%s/data/%s/' % (args.backup_root, dir)]
print cmd
subprocess.check_call(cmd)
fp.write('%s\n' % d)
def main():
args = parse_command_line()
backup_sqlite(args)
backup_data(args)
if __name__ == '__main__':
main()
| zxl200406/minos | tank/backup.py | Python | apache-2.0 | 2,320 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Debbuild(AutotoolsPackage):
"""Build deb packages from rpm specifications."""
homepage = "https://github.com/debbuild/debbuild"
url = "https://github.com/debbuild/debbuild/archive/20.04.0.tar.gz"
version('20.04.0', sha256='e17c4f5b37e8c16592ebd99281884cabc053fb890af26531e9825417047d1430')
depends_on('gettext')
| LLNL/spack | var/spack/repos/builtin/packages/debbuild/package.py | Python | lgpl-2.1 | 566 |
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionsInWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionsInWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
# TODO: do we need for indexing like Java?
month = ndb.IntegerProperty()
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class Session(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StringProperty()
duration = ndb.IntegerProperty()
organizerUserId = ndb.StringProperty()
typeOfSession = ndb.StringProperty(repeated=True)
date = ndb.DateProperty()
startTime = ndb.TimeProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5, repeated=True)
date = messages.StringField(6) # DateTimeField()
startTime = messages.StringField(7)
organizerUserId = messages.StringField(8)
websafeKey = messages.StringField(9)
class SessionForms(messages.Message):
"""SessionForms -- multiple Sessions outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm
inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
| anudhagat/ConferenceCentral | models.py | Python | apache-2.0 | 4,844 |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from crispy_forms.bootstrap import StrictButton, TabHolder, Tab
class WelcomeForm(forms.Form):
login = forms.MultipleChoiceField(
label = "Welcome User : ",
required = False,
choices = (
('option_one', "Enter as a Guest User"),
),
widget = forms.CheckboxSelectMultiple,
)
username = forms.CharField(
label="Enter username",
required= False,
)
password = forms.CharField(
label="Enter password",
required = False,
widget= forms.PasswordInput,
)
# Bootstrap
helper = FormHelper()
helper.field_template = 'bootstrap3/layout/inline_field.html'
helper.form_class = 'form-vertical'
helper.form_action = "."
helper.layout = Layout(
Field('login' ),
Field('username'),
Field('password' ),
FormActions(
Submit('save_changes', 'GO', css_class="btn-primary"),
HTML('<a class="btn btn-danger" href="../">Cancel</a>'),
#Submit('cancel', 'CANCEL', css_class="btn-danger"),
)
)
| niks3089/nixia-console | receiver/welcome_forms.py | Python | gpl-3.0 | 1,209 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample application that demonstrates how to use the App Engine Memcache API.
For more information, see README.md.
"""
# [START all]
import cgi
import cStringIO
import logging
import urllib
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
class Greeting(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
author = ndb.StringProperty()
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
def guestbook_key(guestbook_name=None):
"""Constructs a Datastore key for a Guestbook entity with guestbook_name"""
return ndb.Key('Guestbook', guestbook_name or 'default_guestbook')
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
guestbook_name = self.request.get('guestbook_name')
greetings = self.get_greetings(guestbook_name)
stats = memcache.get_stats()
self.response.write('<b>Cache Hits:{}</b><br>'.format(stats['hits']))
self.response.write('<b>Cache Misses:{}</b><br><br>'.format(
stats['misses']))
self.response.write(greetings)
self.response.write("""
<form action="/sign?{}" method="post">
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
<hr>
<form>Guestbook name: <input value="{}" name="guestbook_name">
<input type="submit" value="switch"></form>
</body>
</html>""".format(urllib.urlencode({'guestbook_name': guestbook_name}),
cgi.escape(guestbook_name)))
# [START check_memcache]
def get_greetings(self, guestbook_name):
"""
get_greetings()
Checks the cache to see if there are cached greetings.
If not, call render_greetings and set the cache
Args:
guestbook_name: Guestbook entity group key (string).
Returns:
A string of HTML containing greetings.
"""
greetings = memcache.get('{}:greetings'.format(guestbook_name))
if greetings is None:
greetings = self.render_greetings(guestbook_name)
try:
added = memcache.add(
'{}:greetings'.format(guestbook_name), greetings, 10)
if not added:
logging.error('Memcache set failed.')
except ValueError:
logging.error('Memcache set failed - data larger than 1MB')
return greetings
# [END check_memcache]
# [START query_datastore]
def render_greetings(self, guestbook_name):
"""
render_greetings()
Queries the database for greetings, iterate through the
results and create the HTML.
Args:
guestbook_name: Guestbook entity group key (string).
Returns:
A string of HTML containing greetings
"""
greetings = ndb.gql('SELECT * '
'FROM Greeting '
'WHERE ANCESTOR IS :1 '
'ORDER BY date DESC LIMIT 10',
guestbook_key(guestbook_name))
output = cStringIO.StringIO()
for greeting in greetings:
if greeting.author:
output.write('<b>{}</b> wrote:'.format(greeting.author))
else:
output.write('An anonymous person wrote:')
output.write('<blockquote>{}</blockquote>'.format(
cgi.escape(greeting.content)))
return output.getvalue()
# [END query_datastore]
class Guestbook(webapp2.RequestHandler):
def post(self):
# We set the same parent key on the 'Greeting' to ensure each greeting
# is in the same entity group. Queries across the single entity group
# are strongly consistent. However, the write rate to a single entity
# group is limited to ~1/second.
guestbook_name = self.request.get('guestbook_name')
greeting = Greeting(parent=guestbook_key(guestbook_name))
if users.get_current_user():
greeting.author = users.get_current_user().nickname()
greeting.content = self.request.get('content')
greeting.put()
memcache.delete('{}:greetings'.format(guestbook_name))
self.redirect('/?' +
urllib.urlencode({'guestbook_name': guestbook_name}))
app = webapp2.WSGIApplication([('/', MainPage),
('/sign', Guestbook)],
debug=True)
# [END all]
| clarko1/Cramd | appengine/standard/memcache/guestbook/main.py | Python | apache-2.0 | 5,338 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network environment formatter."""
from aquilon.aqdb.model import NetworkEnvironment
from aquilon.worker.formats.formatters import ObjectFormatter
class NetworkEnvironmentFormatter(ObjectFormatter):
def format_raw(self, netenv, indent=""):
details = [indent + "{0:c}: {0.name}".format(netenv)]
details.append(self.redirect_raw(netenv.dns_environment, indent + " "))
if netenv.location:
details.append(self.redirect_raw(netenv.location, indent + " "))
if netenv.comments:
details.append(indent + " Comments: %s" % netenv.comments)
return "\n".join(details)
ObjectFormatter.handlers[NetworkEnvironment] = NetworkEnvironmentFormatter()
| stdweird/aquilon | lib/python2.6/aquilon/worker/formats/network_environment.py | Python | apache-2.0 | 1,413 |
# -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <[email protected]> #
# - Valerio Santarelli <[email protected]> #
# - Domenico Fabio Savo <[email protected]> #
# - Daniele Pantaleone <[email protected]> #
# - Marco Console <[email protected]> #
# #
########################################################################## | danielepantaleone/eddy | eddy/ui/__init__.py | Python | gpl-3.0 | 2,349 |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from oslo.config import types
class TypeTestHelper(object):
def setUp(self):
super(TypeTestHelper, self).setUp()
self.type_instance = self.type
def assertConvertedValue(self, s, expected):
self.assertEqual(expected, self.type_instance(s))
def assertInvalid(self, value):
self.assertRaises(ValueError, self.type_instance, value)
class StringTypeTests(TypeTestHelper, unittest.TestCase):
type = types.String()
def test_empty_string_passes(self):
self.assertConvertedValue('', '')
def test_should_return_same_string_if_valid(self):
self.assertConvertedValue('foo bar', 'foo bar')
def test_listed_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertConvertedValue('foo', 'foo')
def test_unlisted_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertInvalid('baz')
def test_with_no_values_returns_error(self):
self.type_instance = types.String(choices=[])
self.assertInvalid('foo')
def test_string_with_non_closed_quote_is_invalid(self):
self.type_instance = types.String(quotes=True)
self.assertInvalid('"foo bar')
self.assertInvalid("'bar baz")
def test_quotes_are_stripped(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('"foo bar"', 'foo bar')
def test_trailing_quote_is_ok(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('foo bar"', 'foo bar"')
def test_repr(self):
t = types.String()
self.assertEqual('String', repr(t))
def test_repr_with_choices(self):
t = types.String(choices=['foo', 'bar'])
self.assertEqual('String(choices=[\'foo\', \'bar\'])', repr(t))
def test_equal(self):
self.assertTrue(types.String() == types.String())
def test_equal_with_same_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'bar'])
self.assertTrue(t1 == t2)
def test_not_equal_with_different_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'baz'])
self.assertFalse(t1 == t2)
def test_equal_with_equal_quote_falgs(self):
t1 = types.String(quotes=True)
t2 = types.String(quotes=True)
self.assertTrue(t1 == t2)
def test_not_equal_with_different_quote_falgs(self):
t1 = types.String(quotes=False)
t2 = types.String(quotes=True)
self.assertFalse(t1 == t2)
def test_not_equal_to_other_class(self):
self.assertFalse(types.String() == types.Integer())
class BooleanTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Boolean()
def test_True(self):
self.assertConvertedValue('True', True)
def test_yes(self):
self.assertConvertedValue('yes', True)
def test_on(self):
self.assertConvertedValue('on', True)
def test_1(self):
self.assertConvertedValue('1', True)
def test_False(self):
self.assertConvertedValue('False', False)
def test_no(self):
self.assertConvertedValue('no', False)
def test_off(self):
self.assertConvertedValue('off', False)
def test_0(self):
self.assertConvertedValue('0', False)
def test_other_values_produce_error(self):
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Boolean', repr(types.Boolean()))
def test_equal(self):
self.assertEqual(types.Boolean(), types.Boolean())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Boolean() == types.String())
class IntegerTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Integer()
def test_empty_string(self):
self.assertConvertedValue('', None)
def test_whitespace_string(self):
self.assertConvertedValue(" \t\t\t\t", None)
def test_positive_values_are_valid(self):
self.assertConvertedValue('123', 123)
def test_zero_is_valid(self):
self.assertConvertedValue('0', 0)
def test_negative_values_are_valid(self):
self.assertConvertedValue('-123', -123)
def test_leading_whitespace_is_ignored(self):
self.assertConvertedValue(' 5', 5)
def test_trailing_whitespace_is_ignored(self):
self.assertConvertedValue('7 ', 7)
def test_non_digits_are_invalid(self):
self.assertInvalid('12a45')
def test_repr(self):
t = types.Integer()
self.assertEqual('Integer', repr(t))
def test_repr_with_min(self):
t = types.Integer(min=123)
self.assertEqual('Integer(min=123)', repr(t))
def test_repr_with_max(self):
t = types.Integer(max=456)
self.assertEqual('Integer(max=456)', repr(t))
def test_repr_with_min_and_max(self):
t = types.Integer(min=123, max=456)
self.assertEqual('Integer(min=123, max=456)', repr(t))
def test_equal(self):
self.assertTrue(types.Integer() == types.Integer())
def test_equal_with_same_min_and_no_max(self):
self.assertTrue(types.Integer(min=123) == types.Integer(min=123))
def test_equal_with_same_max_and_no_min(self):
self.assertTrue(types.Integer(max=123) == types.Integer(max=123))
def test_equal_with_same_min_and_max(self):
t1 = types.Integer(min=1, max=123)
t2 = types.Integer(min=1, max=123)
self.assertTrue(t1 == t2)
def test_not_equal(self):
self.assertFalse(types.Integer(min=123) == types.Integer(min=456))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Integer() == types.String())
def test_with_max_and_min(self):
t = types.Integer(min=123, max=456)
self.assertRaises(ValueError, t, 122)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, 457)
class FloatTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Float()
def test_decimal_format(self):
v = self.type_instance('123.456')
self.assertAlmostEqual(v, 123.456)
def test_decimal_format_negative_float(self):
v = self.type_instance('-123.456')
self.assertAlmostEqual(v, -123.456)
def test_exponential_format(self):
v = self.type_instance('123e-2')
self.assertAlmostEqual(v, 1.23)
def test_non_float_is_invalid(self):
self.assertInvalid('123,345')
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Float', repr(types.Float()))
def test_equal(self):
self.assertTrue(types.Float() == types.Float())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Float() == types.Integer())
class ListTypeTests(TypeTestHelper, unittest.TestCase):
type = types.List()
def test_empty_value(self):
self.assertConvertedValue('', [])
def test_single_value(self):
self.assertConvertedValue(' foo bar ',
['foo bar'])
def test_list_of_values(self):
self.assertConvertedValue(' foo bar, baz ',
['foo bar',
'baz'])
def test_list_of_values_containing_commas(self):
self.type_instance = types.List(types.String(quotes=True))
self.assertConvertedValue('foo,"bar, baz",bam',
['foo',
'bar, baz',
'bam'])
def test_list_of_lists(self):
self.type_instance = types.List(
types.List(types.String(), bounds=True)
)
self.assertConvertedValue('[foo],[bar, baz],[bam]',
[['foo'], ['bar', 'baz'], ['bam']])
def test_list_of_custom_type(self):
self.type_instance = types.List(types.Integer())
self.assertConvertedValue('1,2,3,5',
[1, 2, 3, 5])
def test_bounds_parsing(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertConvertedValue('[1,2,3]', [1, 2, 3])
def test_bounds_required(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertInvalid('1,2,3')
self.assertInvalid('[1,2,3')
self.assertInvalid('1,2,3]')
def test_repr(self):
t = types.List(types.Integer())
self.assertEqual('List of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.List() == types.List())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.List(it1) == types.List(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.List(it1) == types.List(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.List() == types.Integer())
class DictTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Dict()
def test_empty_value(self):
self.assertConvertedValue('', {})
def test_single_value(self):
self.assertConvertedValue(' foo: bar ',
{'foo': 'bar'})
def test_dict_of_values(self):
self.assertConvertedValue(' foo: bar, baz: 123 ',
{'foo': 'bar',
'baz': '123'})
def test_custom_value_type(self):
self.type_instance = types.Dict(types.Integer())
self.assertConvertedValue('foo:123, bar: 456',
{'foo': 123,
'bar': 456})
def test_dict_of_values_containing_commas(self):
self.type_instance = types.Dict(types.String(quotes=True))
self.assertConvertedValue('foo:"bar, baz",bam:quux',
{'foo': 'bar, baz',
'bam': 'quux'})
def test_dict_of_dicts(self):
self.type_instance = types.Dict(
types.Dict(types.String(), bounds=True)
)
self.assertConvertedValue('k1:{k1:v1,k2:v2},k2:{k3:v3}',
{'k1': {'k1': 'v1', 'k2': 'v2'},
'k2': {'k3': 'v3'}})
def test_bounds_parsing(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertConvertedValue('{foo:bar,baz:123}',
{'foo': 'bar',
'baz': '123'})
def test_bounds_required(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertInvalid('foo:bar,baz:123')
self.assertInvalid('{foo:bar,baz:123')
self.assertInvalid('foo:bar,baz:123}')
def test_no_mapping_produces_error(self):
self.assertInvalid('foo,bar')
def test_repr(self):
t = types.Dict(types.Integer())
self.assertEqual('Dict of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.Dict() == types.Dict())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.Dict(it1) == types.Dict(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.Dict(it1) == types.Dict(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Dict() == types.Integer())
class IPAddressTypeTests(TypeTestHelper, unittest.TestCase):
type = types.IPAddress()
def test_ipv4_address(self):
self.assertConvertedValue('192.168.0.1', '192.168.0.1')
def test_ipv6_address(self):
self.assertConvertedValue('abcd:ef::1', 'abcd:ef::1')
def test_strings(self):
self.assertInvalid('')
self.assertInvalid('foo')
def test_numbers(self):
self.assertInvalid(1)
self.assertInvalid(-1)
self.assertInvalid(3.14)
class IPv4AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(4)
def test_ipv6_address(self):
self.assertInvalid('abcd:ef::1')
class IPv6AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(6)
def test_ipv4_address(self):
self.assertInvalid('192.168.0.1')
| JioCloud/oslo.config | tests/test_types.py | Python | apache-2.0 | 13,215 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks, sync_mempools
class WalletBackupTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.rpc_timeout = 180
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallet.dat'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'), 'I_UNDERSTAND_AND_ACCEPT_THE_RISK_OF_DUMPING_AN_HD_PRIVKEY')
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'), 'I_UNDERSTAND_AND_ACCEPT_THE_RISK_OF_DUMPING_AN_HD_PRIVKEY')
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'), 'I_UNDERSTAND_AND_ACCEPT_THE_RISK_OF_DUMPING_AN_HD_PRIVKEY')
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'witstate'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'autocheckpoints'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'witstate'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'autocheckpoints'))
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].createaccount(name="Legacy", type="Legacy")
self.nodes[0].setactiveaccount(account="Legacy")
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'), 'Legacy')
self.nodes[1].createaccount(name="Legacy", type="Legacy")
self.nodes[1].setactiveaccount(account="Legacy")
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'), 'Legacy')
self.nodes[2].createaccount(name="Legacy", type="Legacy")
self.nodes[2].setactiveaccount(account="Legacy")
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'), 'Legacy')
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, 'regtest', 'wallet.dat')
]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| nlgcoin/guldencoin-official | test/functional/wallet_backup.py | Python | mit | 8,674 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from ccmlib import common
except ImportError as e:
raise unittest.SkipTest('ccm is a dependency for integration tests:', e)
| datastax/python-driver | tests/integration/long/__init__.py | Python | apache-2.0 | 723 |
# coding: utf-8
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from common.util import smooth_curve
from common.multi_layer_net import MultiLayerNet
from common.optimizer import SGD
# 0:MNISTデータの読み込み==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:実験の設定==========
weight_init_types = {'std=0.01': 0.01, 'Xavier': 'sigmoid', 'He': 'relu'}
optimizer = SGD(lr=0.01)
networks = {}
train_loss = {}
for key, weight_type in weight_init_types.items():
networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10, weight_init_std=weight_type)
train_loss[key] = []
# 2:訓練の開始==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in weight_init_types.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizer.update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print("===========" + "iteration:" + str(i) + "===========")
for key in weight_init_types.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.グラフの描画==========
markers = {'std=0.01': 'o', 'Xavier': 's', 'He': 'D'}
x = np.arange(max_iterations)
for key in weight_init_types.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 2.5)
plt.legend()
plt.show() | Takasudo/studyPython | deep/ch06/weight_init_compare.py | Python | gpl-3.0 | 1,951 |
import functools
import logging
from c2corg_api import DBSession, caching
from c2corg_api.caching import cache_sitemap_xml
from c2corg_api.models.cache_version import CacheVersion
from c2corg_api.models.document import Document, DocumentLocale
from c2corg_api.models.route import ROUTE_TYPE, RouteLocale
from c2corg_api.models.user_profile import USERPROFILE_TYPE
from c2corg_api.views import cors_policy, etag_cache
from c2corg_api.views.validation import create_int_validator, \
validate_document_type
from c2corg_api.caching import get_or_create
from cornice.resource import resource, view
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy.sql.functions import func
from math import ceil
from datetime import date, datetime, timezone
from slugify import slugify
log = logging.getLogger(__name__)
# Search engines accept not more than 50000 urls per sitemap,
# and the sitemap files may not exceed 10 MB. With 50000 urls the sitemaps
# are not bigger than 9MB, but to be safe we are using 45000 urls per sitemap.
# see http://www.sitemaps.org/protocol.html
PAGES_PER_SITEMAP = 45000
UI_ENTRY_POINTS = {
'a': 'areas',
'b': 'books',
'c': 'articles',
'i': 'images',
'm': 'maps',
'o': 'outings',
'r': 'routes',
'w': 'waypoints',
'x': 'xreports'
}
validate_page = create_int_validator('i')
@resource(
collection_path='/sitemaps.xml', path='/sitemaps.xml/{doc_type}/{i}.xml',
cors_policy=cors_policy, renderer='string')
class SitemapXml(object):
def __init__(self, request):
self.request = request
@view()
def collection_get(self):
""" Returns a sitemap index file.
See: http://www.sitemaps.org/protocol.html
The response consists of a list of URLs of sitemaps.
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/w/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/a/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/i/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/i/1.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
</sitemap>
"""
cache_key = _get_cache_key()
etag_cache(self.request, cache_key)
self.request.response.content_type = "text/xml"
return get_or_create(
cache_sitemap_xml,
cache_key,
_get_sitemap_index
)
@view(validators=[validate_page, validate_document_type])
def get(self):
""" Returns a sitemap file for a given
type and sitemap page number.
"""
doc_type = self.request.validated['doc_type']
i = self.request.validated['i']
self.request.response.content_type = "text/xml"
cache_key = _get_cache_key(doc_type, i)
etag_cache(self.request, cache_key)
return get_or_create(
cache_sitemap_xml,
cache_key,
functools.partial(_get_sitemap, doc_type, i))
def _get_cache_key(doc_type=None, i=None):
if doc_type:
return '{}-{}-{}-{}'.format(
doc_type, i, date.today().isoformat(), caching.CACHE_VERSION)
else:
return '{}-{}'.format(
date.today().isoformat(), caching.CACHE_VERSION)
def _get_sitemap_index():
document_locales_per_type = DBSession. \
query(Document.type, func.count().label('count')). \
join(
DocumentLocale,
Document.document_id == DocumentLocale.document_id). \
filter(Document.type != USERPROFILE_TYPE). \
group_by(Document.type). \
all()
sitemaps = []
now = datetime.utcnow().replace(tzinfo=timezone.utc)
lastmod = now.isoformat()
template = """<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/{doc_type}/{i}.xml</loc>
<lastmod>{lastmod}</lastmod>
</sitemap>"""
for doc_type, count in document_locales_per_type:
num_sitemaps = ceil(count / PAGES_PER_SITEMAP)
sitemaps_for_type = [
template.format(
doc_type=doc_type,
i=i,
lastmod=lastmod
)
for i in range(0, num_sitemaps)
]
sitemaps.extend(sitemaps_for_type)
return """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</sitemapindex>""".format("\n".join(sitemaps))
def _get_sitemap(doc_type, i):
fields = [
Document.document_id, DocumentLocale.lang, DocumentLocale.title,
CacheVersion.last_updated
]
# include `title_prefix` for routes
is_route = doc_type == ROUTE_TYPE
if is_route:
fields.append(RouteLocale.title_prefix)
base_query = DBSession. \
query(*fields). \
select_from(Document). \
join(DocumentLocale,
Document.document_id == DocumentLocale.document_id)
if is_route:
# joining on `RouteLocale.__table_` instead of `RouteLocale` to
# avoid that SQLAlchemy create an additional join on DocumentLocale
base_query = base_query. \
join(RouteLocale.__table__,
DocumentLocale.id == RouteLocale.id)
base_query = base_query. \
join(CacheVersion,
Document.document_id == CacheVersion.document_id). \
filter(Document.redirects_to.is_(None)). \
filter(Document.type == doc_type). \
order_by(Document.document_id, DocumentLocale.lang). \
limit(PAGES_PER_SITEMAP). \
offset(PAGES_PER_SITEMAP * i)
document_locales = base_query.all()
if not document_locales:
raise HTTPNotFound()
ui_entry_point = UI_ENTRY_POINTS[doc_type]
return """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</urlset>""".format("\n".join([
_format_page(ui_entry_point, *locale)
for locale in document_locales
]))
def _format_page(
ui_entry_point, doc_id, lang, title, last_updated, title_prefix=None):
page = {
'document_id': doc_id,
'lang': lang,
'lastmod': last_updated.isoformat(),
'ui_entry_point': ui_entry_point
}
if title_prefix:
page['title'] = slugify("{} {}".format(title_prefix, title))
else:
page['title'] = slugify(title)
return """<url>
<loc>https://www.camptocamp.org/{ui_entry_point}/{document_id}/{lang}/{title}</loc>
<lastmod>{lastmod}</lastmod>
<changefreq>weekly</changefreq>
</url>""".format(**page)
| c2corg/v6_api | c2corg_api/views/sitemap_xml.py | Python | agpl-3.0 | 7,153 |
#!/usr/bin/env python3
"""
backend.py - ffmap-backend runner
https://github.com/ffnord/ffmap-backend
"""
import argparse
import json
import os
import sys
from datetime import datetime
import networkx as nx
from networkx.readwrite import json_graph
from lib import graph, nodes
from lib.alfred import Alfred
from lib.batman import Batman
from lib.rrddb import RRD
from lib.nodelist import export_nodelist
NODES_VERSION = 1
GRAPH_VERSION = 1
def main(params):
os.makedirs(params['dest_dir'], exist_ok=True)
nodes_fn = os.path.join(params['dest_dir'], 'nodes.json')
graph_fn = os.path.join(params['dest_dir'], 'graph.json')
nodelist_fn = os.path.join(params['dest_dir'], 'nodelist.json')
now = datetime.utcnow().replace(microsecond=0)
# parse mesh param and instantiate Alfred/Batman instances
alfred_instances = []
batman_instances = []
for value in params['mesh']:
# (1) only batman-adv if, no alfred sock
if ':' not in value:
if len(params['mesh']) > 1:
raise ValueError(
'Multiple mesh interfaces require the use of '
'alfred socket paths.')
alfred_instances.append(Alfred(unix_sockpath=None))
batman_instances.append(Batman(mesh_interface=value))
else:
# (2) batman-adv if + alfred socket
try:
batif, alfredsock = value.split(':')
alfred_instances.append(Alfred(unix_sockpath=alfredsock))
batman_instances.append(Batman(mesh_interface=batif,
alfred_sockpath=alfredsock))
except ValueError:
raise ValueError(
'Unparseable value "{0}" in --mesh parameter.'.
format(value))
# read nodedb state from node.json
try:
with open(nodes_fn, 'r') as nodedb_handle:
nodedb = json.load(nodedb_handle)
except IOError:
nodedb = {'nodes': dict()}
# flush nodedb if it uses the old format
if 'links' in nodedb:
nodedb = {'nodes': dict()}
# set version we're going to output
nodedb['version'] = NODES_VERSION
# update timestamp and assume all nodes are offline
nodedb['timestamp'] = now.isoformat()
for node_id, node in nodedb['nodes'].items():
node['flags']['online'] = False
# integrate alfred nodeinfo
for alfred in alfred_instances:
nodes.import_nodeinfo(nodedb['nodes'], alfred.nodeinfo(),
now, assume_online=True,
hide_ownership=params['hide_ownership'])
# integrate static aliases data
for aliases in params['aliases']:
with open(aliases, 'r') as f:
nodes.import_nodeinfo(nodedb['nodes'], json.load(f),
now, assume_online=False)
nodes.reset_statistics(nodedb['nodes'])
for alfred in alfred_instances:
nodes.import_statistics(nodedb['nodes'], alfred.statistics())
# acquire gwl and visdata for each batman instance
mesh_info = []
for batman in batman_instances:
vd = batman.vis_data()
gwl = batman.gateway_list()
mesh_info.append((vd, gwl))
# update nodedb from batman-adv data
for vd, gwl in mesh_info:
nodes.import_mesh_ifs_vis_data(nodedb['nodes'], vd)
nodes.import_vis_clientcount(nodedb['nodes'], vd)
nodes.mark_vis_data_online(nodedb['nodes'], vd, now)
nodes.mark_gateways(nodedb['nodes'], gwl)
# clear the nodedb from nodes that have not been online in $prune days
if params['prune']:
nodes.prune_nodes(nodedb['nodes'], now, params['prune'])
# build nxnetworks graph from nodedb and visdata
batadv_graph = nx.DiGraph()
for vd, gwl in mesh_info:
graph.import_vis_data(batadv_graph, nodedb['nodes'], vd)
# force mac addresses to be vpn-link only (like gateways for example)
if params['vpn']:
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
batadv_graph = graph.merge_nodes(batadv_graph)
batadv_graph = graph.to_undirected(batadv_graph)
# write processed data to dest dir
with open(nodes_fn, 'w') as f:
json.dump(nodedb, f)
graph_out = {'batadv': json_graph.node_link_data(batadv_graph),
'version': GRAPH_VERSION}
with open(graph_fn, 'w') as f:
json.dump(graph_out, f)
with open(nodelist_fn, 'w') as f:
json.dump(export_nodelist(now, nodedb), f)
# optional rrd graphs (trigger with --rrd)
if params['rrd']:
script_directory = os.path.dirname(os.path.realpath(__file__))
rrd = RRD(os.path.join(script_directory, 'nodedb'),
os.path.join(params['dest_dir'], 'nodes'))
rrd.update_database(nodedb['nodes'])
rrd.update_images()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='Read aliases from FILE',
nargs='+', default=[], metavar='FILE')
parser.add_argument('-m', '--mesh',
default=['bat0'], nargs='+',
help='Use given batman-adv mesh interface(s) (defaults'
'to bat0); specify alfred unix socket like '
'bat0:/run/alfred0.sock.')
parser.add_argument('-d', '--dest-dir', action='store',
help='Write output to destination directory',
required=True)
parser.add_argument('-V', '--vpn', nargs='+', metavar='MAC',
help='Assume MAC addresses are part of vpn')
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
help='forget nodes offline for at least DAYS')
parser.add_argument('--with-rrd', dest='rrd', action='store_true',
default=False,
help='Enable the rendering of RRD graphs '
'(cpu intensive)')
parser.add_argument('--with-hidden-ownership', dest='hide_ownership',
action='store_true', default=False,
help='Remove owner/contact information from'
'alfred nodeinfo')
options = vars(parser.parse_args())
main(options)
| freifunk-fulda/ffmap-backend | backend.py | Python | bsd-3-clause | 6,391 |
# Copyright 2019 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def source_hierarchy(source_path):
return source_path.split(".")
def source_root(source_path):
return source_hierarchy(source_path)[0]
def source_lookup(sources, source_path):
cur_node = sources
for node in source_hierarchy(source_path):
cur_node = cur_node[node]
return cur_node
| aerospike/aerospike-admin | lib/view/sheet/source.py | Python | apache-2.0 | 891 |
# Copyright 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Some useful fakes."""
from manila.tests.db import fakes as db_fakes
def fake_share(**kwargs):
share = {
'id': 'fakeid',
'share_id': 'fakeshareid',
'name': 'fakename',
'size': 1,
'share_proto': 'NFS',
'export_location': '127.0.0.1:/mnt/nfs/volume-00002',
}
share.update(kwargs)
return db_fakes.FakeModel(share)
def fake_access(**kwargs):
access = {
'id': 'fakeaccid',
'access_type': 'ip',
'access_to': '10.0.0.2',
'access_level': 'rw',
'state': 'active',
}
access.update(kwargs)
return db_fakes.FakeModel(access)
def fake_network(**kwargs):
allocations = db_fakes.FakeModel({'id': 'fake_allocation_id',
'ip_address': '127.0.0.0.1',
'mac_address': 'fe:16:3e:61:e0:58'})
network = {
'id': 'fake_network_id',
'server_id': 'fake_server_id',
'network_allocations': [allocations],
'neutron_net_id': 'fake_net',
'neutron_subnet_id': 'fake_subnet',
}
network.update(kwargs)
return db_fakes.FakeModel(network)
| bswartz/manila | manila/tests/share/drivers/container/fakes.py | Python | apache-2.0 | 1,795 |
from hubcheck.pageobjects.basepageobject import BasePageObject
from hubcheck.pageobjects.basepageelement import Link
from selenium.common.exceptions import NoSuchElementException
class GenericPage(BasePageObject):
"""Generic Page with just a header and footer"""
def __init__(self,browser,catalog):
super(GenericPage,self).__init__(browser,catalog)
self.path = '/'
# load hub's classes
GenericPage_Locators = self.load_class('GenericPage_Locators')
NeedHelpForm = self.load_class('NeedHelpForm')
Header = self.load_class('Header')
Footer = self.load_class('Footer')
# update this object's locator
self.locators = GenericPage_Locators.locators
# setup page object's components
self.needhelpform = NeedHelpForm(self,{},self.__refreshCaptchaCB)
self.needhelplink = Link(self,{'base':'needhelplink'})
self.header = Header(self)
# self.footer = Footer(self)
def __refreshCaptchaCB(self):
self._browser.refresh()
self.needhelplink.click()
def goto_login(self):
return self.header.goto_login()
def goto_register(self):
return self.header.goto_register()
def goto_logout(self):
return self.header.goto_logout()
def goto_myaccount(self):
return self.header.goto_myaccount()
def goto_profile(self):
return self.header.goto_profile()
def toggle_needhelp(self):
return self.needhelplink.click()
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
return self.header.is_logged_in()
def get_account_number(self):
"""return the account number of a logged in user based on urls"""
if not self.is_logged_in():
raise RuntimeError("user is not logged in")
return self.header.get_account_number()
def get_debug_info(self):
rtxt = []
for e in self.find_elements(self.locators['debug']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_notification_info(self):
rtxt = []
for e in self.find_elements(self.locators['notification']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_success_info(self):
rtxt = []
for e in self.find_elements(self.locators['success']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_error_info(self):
rtxt = []
for e in self.find_elements(self.locators['error']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_errorbox_info(self):
rtxt = []
for e in self.find_elements(self.locators['errorbox1']):
if e.is_displayed():
rtxt.append(e.text)
for e in self.find_elements(self.locators['errorbox2']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
class GenericPage_Locators_Base_1(object):
"""
locators for GenericPage object
"""
locators = {
'needhelplink' : "css=#tab",
'debug' : "css=#system-debug",
'error' : "css=.error",
'success' : "css=.passed",
'notification' : "css=#page_notifications",
'errorbox1' : "css=#errorbox",
'errorbox2' : "css=#error-box",
}
| codedsk/hubcheck | hubcheck/pageobjects/po_generic_page.py | Python | mit | 3,505 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CategoryGrid'
db.delete_table('smartgrid_categorygrid')
# Deleting model 'Category'
db.delete_table('smartgrid_category')
# Adding model 'ColumnGrid'
db.create_table('smartgrid_columngrid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid.Level'])),
('column', self.gf('django.db.models.fields.IntegerField')(default=1)),
('name', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid.ColumnName'])),
))
db.send_create_signal('smartgrid', ['ColumnGrid'])
# Adding model 'ColumnName'
db.create_table('smartgrid_columnname', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
))
db.send_create_signal('smartgrid', ['ColumnName'])
def backwards(self, orm):
# Adding model 'CategoryGrid'
db.create_table('smartgrid_categorygrid', (
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid.Category'])),
('column', self.gf('django.db.models.fields.IntegerField')(default=1)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid.Level'])),
))
db.send_create_signal('smartgrid', ['CategoryGrid'])
# Adding model 'Category'
db.create_table('smartgrid_category', (
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('smartgrid', ['Category'])
# Deleting model 'ColumnGrid'
db.delete_table('smartgrid_columngrid')
# Deleting model 'ColumnName'
db.delete_table('smartgrid_columnname')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 18, 11, 1, 51, 455653)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 18, 11, 1, 51, 455569)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notifications.usernotification': {
'Meta': {'object_name': 'UserNotification'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'contents': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'score_mgr.pointstransaction': {
'Meta': {'ordering': "('-transaction_date',)", 'unique_together': "(('user', 'transaction_date', 'message'),)", 'object_name': 'PointsTransaction'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {}),
'transaction_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'smartgrid.action': {
'Meta': {'object_name': 'Action'},
'description': ('django.db.models.fields.TextField', [], {}),
'embedded_widget': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'expire_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'point_value': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2013, 4, 18)'}),
'related_resource': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'social_bonus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'unlock_condition': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'unlock_condition_text': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['smartgrid.ActionMember']", 'symmetrical': 'False'}),
'video_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'video_source': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'smartgrid.actionmember': {
'Meta': {'unique_together': "(('user', 'action', 'submission_date'),)", 'object_name': 'ActionMember'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'admin_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'admin_link': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '20'}),
'award_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'completion_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'points_awarded': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.TextPromptQuestion']", 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'social_bonus_awarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'social_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'smartgrid.activity': {
'Meta': {'object_name': 'Activity', '_ormbases': ['smartgrid.Action']},
'action_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['smartgrid.Action']", 'unique': 'True', 'primary_key': 'True'}),
'admin_note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'confirm_prompt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirm_type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'expected_duration': ('django.db.models.fields.IntegerField', [], {}),
'point_range_end': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'point_range_start': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'smartgrid.columngrid': {
'Meta': {'object_name': 'ColumnGrid'},
'column': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Level']"}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.ColumnName']"})
},
'smartgrid.columnname': {
'Meta': {'object_name': 'ColumnName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
},
'smartgrid.commitment': {
'Meta': {'object_name': 'Commitment', '_ormbases': ['smartgrid.Action']},
'action_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['smartgrid.Action']", 'unique': 'True', 'primary_key': 'True'}),
'commitment_length': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
'smartgrid.confirmationcode': {
'Meta': {'object_name': 'ConfirmationCode'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 18, 11, 1, 50, 961109)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'printed_or_distributed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'smartgrid.emailreminder': {
'Meta': {'unique_together': "(('user', 'action'),)", 'object_name': 'EmailReminder'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_at': ('django.db.models.fields.DateTimeField', [], {}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'smartgrid.event': {
'Meta': {'object_name': 'Event', '_ormbases': ['smartgrid.Action']},
'action_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['smartgrid.Action']", 'unique': 'True', 'primary_key': 'True'}),
'event_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'event_max_seat': ('django.db.models.fields.IntegerField', [], {'default': '1000'}),
'expected_duration': ('django.db.models.fields.IntegerField', [], {})
},
'smartgrid.filler': {
'Meta': {'object_name': 'Filler', '_ormbases': ['smartgrid.Action']},
'action_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['smartgrid.Action']", 'unique': 'True', 'primary_key': 'True'})
},
'smartgrid.grid': {
'Meta': {'ordering': "('level', 'column', 'row')", 'object_name': 'Grid'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'column': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Level']"}),
'row': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'smartgrid.level': {
'Meta': {'ordering': "('priority',)", 'object_name': 'Level'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'unlock_condition': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'unlock_condition_text': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'})
},
'smartgrid.questionchoice': {
'Meta': {'object_name': 'QuestionChoice'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.TextPromptQuestion']"})
},
'smartgrid.textpromptquestion': {
'Meta': {'object_name': 'TextPromptQuestion'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'answer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'smartgrid.textreminder': {
'Meta': {'unique_together': "(('user', 'action'),)", 'object_name': 'TextReminder'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smartgrid.Action']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_at': ('django.db.models.fields.DateTimeField', [], {}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_carrier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'text_number': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['smartgrid']
| jtakayama/makahiki-draft | makahiki/apps/widgets/smartgrid/migrations/0017_auto__del_categorygrid__del_category__add_columngrid__add_columnname.py | Python | mit | 20,369 |
"""
ND discretization
=================
Space discretization module groups functions to discretize n-dimensional spaces
in regions and facilitate the retrieve by regions or define neighbourhood with
fixed regions.
"""
| tgquintela/pySpatialTools | pySpatialTools/Discretization/Discretization_nd/__init__.py | Python | mit | 220 |
from dataclasses import dataclass
from typing import List
from monkey_island.cc.database import mongo
from monkey_island.cc.services.node import NodeService
@dataclass
class ManualExploitation:
hostname: str
ip_addresses: List[str]
start_time: str
def get_manual_exploitations() -> List[ManualExploitation]:
monkeys = get_manual_monkeys()
return [monkey_to_manual_exploitation(monkey) for monkey in monkeys]
def get_manual_monkeys():
return [
monkey for monkey in mongo.db.monkey.find({}) if NodeService.get_monkey_manual_run(monkey)
]
def monkey_to_manual_exploitation(monkey: dict) -> ManualExploitation:
return ManualExploitation(
hostname=monkey["hostname"],
ip_addresses=monkey["ip_addresses"],
start_time=monkey["launch_time"],
)
| guardicore/monkey | monkey/monkey_island/cc/services/reporting/exploitations/manual_exploitation.py | Python | gpl-3.0 | 813 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_nat
short_description: Manage NAT rules in Meraki cloud
version_added: "2.9"
description:
- Allows for creation, management, and visibility of NAT rules (1:1, 1:many, port forwarding) within Meraki.
options:
state:
description:
- Create or modify an organization.
choices: [present, query]
default: present
type: str
net_name:
description:
- Name of a network.
aliases: [name, network]
type: str
net_id:
description:
- ID number of a network.
type: str
org_id:
description:
- ID of organization associated to a network.
type: str
subset:
description:
- Specifies which NAT components to query.
choices: ['1:1', '1:many', all, port_forwarding]
default: all
type: list
one_to_one:
description:
- List of 1:1 NAT rules.
type: list
suboptions:
name:
description:
- A descriptive name for the rule.
type: str
public_ip:
description:
- The IP address that will be used to access the internal resource from the WAN.
type: str
lan_ip:
description:
- The IP address of the server or device that hosts the internal resource that you wish to make available on the WAN.
type: str
uplink:
description:
- The physical WAN interface on which the traffic will arrive.
choices: [both, internet1, internet2]
type: str
allowed_inbound:
description:
- The ports this mapping will provide access on, and the remote IPs that will be allowed access to the resource.
type: list
suboptions:
protocol:
description:
- Protocol to apply NAT rule to.
choices: [any, icmp-ping, tcp, udp]
type: str
default: any
destination_ports:
description:
- List of ports or port ranges that will be forwarded to the host on the LAN.
type: list
allowed_ips:
description:
- ranges of WAN IP addresses that are allowed to make inbound connections on the specified ports or port ranges, or 'any'.
type: list
one_to_many:
description:
- List of 1:many NAT rules.
type: list
suboptions:
public_ip:
description:
- The IP address that will be used to access the internal resource from the WAN.
type: str
uplink:
description:
- The physical WAN interface on which the traffic will arrive.
choices: [both, internet1, internet2]
type: str
port_rules:
description:
- List of associated port rules.
type: list
suboptions:
name:
description:
- A description of the rule.
type: str
protocol:
description:
- Protocol to apply NAT rule to.
choices: [tcp, udp]
type: str
public_port:
description:
- Destination port of the traffic that is arriving on the WAN.
type: str
local_ip:
description:
- Local IP address to which traffic will be forwarded.
type: str
local_port:
description:
- Destination port of the forwarded traffic that will be sent from the MX to the specified host on the LAN.
- If you simply wish to forward the traffic without translating the port, this should be the same as the Public port.
type: str
allowed_ips:
description:
- Remote IP addresses or ranges that are permitted to access the internal resource via this port forwarding rule, or 'any'.
type: list
port_forwarding:
description:
- List of port forwarding rules.
type: list
suboptions:
name:
description:
- A descriptive name for the rule.
type: str
lan_ip:
description:
- The IP address of the server or device that hosts the internal resource that you wish to make available on the WAN.
type: str
uplink:
description:
- The physical WAN interface on which the traffic will arrive.
choices: [both, internet1, internet2]
type: str
public_port:
description:
- A port or port ranges that will be forwarded to the host on the LAN.
type: int
local_port:
description:
- A port or port ranges that will receive the forwarded traffic from the WAN.
type: int
allowed_ips:
description:
- List of ranges of WAN IP addresses that are allowed to make inbound connections on the specified ports or port ranges (or any).
type: list
protocol:
description:
- Protocol to forward traffic for.
choices: [tcp, udp]
type: str
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query all NAT rules
meraki_nat:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: query
subset: all
delegate_to: localhost
- name: Query 1:1 NAT rules
meraki_nat:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: query
subset: '1:1'
delegate_to: localhost
- name: Create 1:1 rule
meraki_nat:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
one_to_one:
- name: Service behind NAT
public_ip: 1.2.1.2
lan_ip: 192.168.128.1
uplink: internet1
allowed_inbound:
- protocol: tcp
destination_ports:
- 80
allowed_ips:
- 10.10.10.10
delegate_to: localhost
- name: Create 1:many rule
meraki_nat:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
one_to_many:
- public_ip: 1.1.1.1
uplink: internet1
port_rules:
- name: Test rule
protocol: tcp
public_port: 10
local_ip: 192.168.128.1
local_port: 11
allowed_ips:
- any
delegate_to: localhost
- name: Create port forwarding rule
meraki_nat:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
port_forwarding:
- name: Test map
lan_ip: 192.168.128.1
uplink: both
protocol: tcp
allowed_ips:
- 1.1.1.1
public_port: 10
local_port: 11
delegate_to: localhost
'''
RETURN = r'''
data:
description: Information about the created or manipulated object.
returned: success
type: complex
contains:
one_to_one:
description: Information about 1:1 NAT object.
returned: success, when 1:1 NAT object is in task
type: complex
contains:
rules:
description: List of 1:1 NAT rules.
returned: success, when 1:1 NAT object is in task
type: complex
contains:
name:
description: Name of NAT object.
returned: success, when 1:1 NAT object is in task
type: str
example: Web server behind NAT
lanIp:
description: Local IP address to be mapped.
returned: success, when 1:1 NAT object is in task
type: str
example: 192.168.128.22
publicIp:
description: Public IP address to be mapped.
returned: success, when 1:1 NAT object is in task
type: str
example: 148.2.5.100
uplink:
description: Internet port where rule is applied.
returned: success, when 1:1 NAT object is in task
type: str
example: internet1
allowedInbound:
description: List of inbound forwarding rules.
returned: success, when 1:1 NAT object is in task
type: complex
contains:
protocol:
description: Protocol to apply NAT rule to.
returned: success, when 1:1 NAT object is in task
type: str
example: tcp
destinationPorts:
description: Ports to apply NAT rule to.
returned: success, when 1:1 NAT object is in task
type: str
example: 80
allowedIps:
description: List of IP addresses to be forwarded.
returned: success, when 1:1 NAT object is in task
type: list
example: 10.80.100.0/24
one_to_many:
description: Information about 1:many NAT object.
returned: success, when 1:many NAT object is in task
type: complex
contains:
rules:
description: List of 1:many NAT rules.
returned: success, when 1:many NAT object is in task
type: complex
contains:
publicIp:
description: Public IP address to be mapped.
returned: success, when 1:many NAT object is in task
type: str
example: 148.2.5.100
uplink:
description: Internet port where rule is applied.
returned: success, when 1:many NAT object is in task
type: str
example: internet1
portRules:
description: List of NAT port rules.
returned: success, when 1:many NAT object is in task
type: complex
contains:
name:
description: Name of NAT object.
returned: success, when 1:many NAT object is in task
type: str
example: Web server behind NAT
protocol:
description: Protocol to apply NAT rule to.
returned: success, when 1:1 NAT object is in task
type: str
example: tcp
publicPort:
description: Destination port of the traffic that is arriving on WAN.
returned: success, when 1:1 NAT object is in task
type: int
example: 9443
localIp:
description: Local IP address traffic will be forwarded.
returned: success, when 1:1 NAT object is in task
type: str
example: 192.0.2.10
localPort:
description: Destination port to be forwarded to.
returned: success, when 1:1 NAT object is in task
type: int
example: 443
allowedIps:
description: List of IP addresses to be forwarded.
returned: success, when 1:1 NAT object is in task
type: list
example: 10.80.100.0/24
port_forwarding:
description: Information about port forwarding rules.
returned: success, when port forwarding is in task
type: complex
contains:
rules:
description: List of port forwarding rules.
returned: success, when port forwarding is in task
type: complex
contains:
lanIp:
description: Local IP address to be mapped.
returned: success, when port forwarding is in task
type: str
example: 192.168.128.22
allowedIps:
description: List of IP addresses to be forwarded.
returned: success, when port forwarding is in task
type: list
example: 10.80.100.0/24
name:
description: Name of NAT object.
returned: success, when port forwarding is in task
type: str
example: Web server behind NAT
protocol:
description: Protocol to apply NAT rule to.
returned: success, when port forwarding is in task
type: str
example: tcp
publicPort:
description: Destination port of the traffic that is arriving on WAN.
returned: success, when port forwarding is in task
type: int
example: 9443
localPort:
description: Destination port to be forwarded to.
returned: success, when port forwarding is in task
type: int
example: 443
uplink:
description: Internet port where rule is applied.
returned: success, when port forwarding is in task
type: str
example: internet1
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
key_map = {'name': 'name',
'public_ip': 'publicIp',
'lan_ip': 'lanIp',
'uplink': 'uplink',
'allowed_inbound': 'allowedInbound',
'protocol': 'protocol',
'destination_ports': 'destinationPorts',
'allowed_ips': 'allowedIps',
'port_rules': 'portRules',
'public_port': 'publicPort',
'local_ip': 'localIp',
'local_port': 'localPort',
}
def construct_payload(params):
if isinstance(params, list):
items = []
for item in params:
items.append(construct_payload(item))
return items
elif isinstance(params, dict):
info = {}
for param in params:
info[key_map[param]] = construct_payload(params[param])
return info
elif isinstance(params, str) or isinstance(params, int):
return params
def list_int_to_str(data):
return [str(item) for item in data]
def main():
# define the available arguments/parameters that a user can pass to
# the module
one_to_one_allowed_inbound_spec = dict(protocol=dict(type='str', choices=['tcp', 'udp', 'icmp-ping', 'any'], default='any'),
destination_ports=dict(type='list', element='str'),
allowed_ips=dict(type='list'),
)
one_to_many_port_inbound_spec = dict(protocol=dict(type='str', choices=['tcp', 'udp']),
name=dict(type='str'),
local_ip=dict(type='str'),
local_port=dict(type='str'),
allowed_ips=dict(type='list'),
public_port=dict(type='str'),
)
one_to_one_spec = dict(name=dict(type='str'),
public_ip=dict(type='str'),
lan_ip=dict(type='str'),
uplink=dict(type='str', choices=['internet1', 'internet2', 'both']),
allowed_inbound=dict(type='list', element='dict', options=one_to_one_allowed_inbound_spec),
)
one_to_many_spec = dict(public_ip=dict(type='str'),
uplink=dict(type='str', choices=['internet1', 'internet2', 'both']),
port_rules=dict(type='list', element='dict', options=one_to_many_port_inbound_spec),
)
port_forwarding_spec = dict(name=dict(type='str'),
lan_ip=dict(type='str'),
uplink=dict(type='str', choices=['internet1', 'internet2', 'both']),
protocol=dict(type='str', choices=['tcp', 'udp']),
public_port=dict(type='int'),
local_port=dict(type='int'),
allowed_ips=dict(type='list'),
)
argument_spec = meraki_argument_spec()
argument_spec.update(
net_id=dict(type='str'),
net_name=dict(type='str', aliases=['name', 'network']),
state=dict(type='str', choices=['present', 'query'], default='present'),
subset=dict(type='list', choices=['1:1', '1:many', 'all', 'port_forwarding'], default='all'),
one_to_one=dict(type='list', elements='dict', options=one_to_one_spec),
one_to_many=dict(type='list', elements='dict', options=one_to_many_spec),
port_forwarding=dict(type='list', elements='dict', options=port_forwarding_spec),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='nat')
module.params['follow_redirects'] = 'all'
one_to_one_payload = None
one_to_many_payload = None
port_forwarding_payload = None
if meraki.params['state'] == 'present':
if meraki.params['one_to_one'] is not None:
rules = []
for i in meraki.params['one_to_one']:
data = {'name': i['name'],
'publicIp': i['public_ip'],
'uplink': i['uplink'],
'lanIp': i['lan_ip'],
'allowedInbound': construct_payload(i['allowed_inbound'])
}
for inbound in data['allowedInbound']:
inbound['destinationPorts'] = list_int_to_str(inbound['destinationPorts'])
rules.append(data)
one_to_one_payload = {'rules': rules}
if meraki.params['one_to_many'] is not None:
rules = []
for i in meraki.params['one_to_many']:
data = {'publicIp': i['public_ip'],
'uplink': i['uplink'],
}
port_rules = []
for port_rule in i['port_rules']:
rule = {'name': port_rule['name'],
'protocol': port_rule['protocol'],
'publicPort': str(port_rule['public_port']),
'localIp': port_rule['local_ip'],
'localPort': str(port_rule['local_port']),
'allowedIps': port_rule['allowed_ips'],
}
port_rules.append(rule)
data['portRules'] = port_rules
rules.append(data)
one_to_many_payload = {'rules': rules}
if meraki.params['port_forwarding'] is not None:
port_forwarding_payload = {'rules': construct_payload(meraki.params['port_forwarding'])}
for rule in port_forwarding_payload['rules']:
rule['localPort'] = str(rule['localPort'])
rule['publicPort'] = str(rule['publicPort'])
onetomany_urls = {'nat': '/networks/{net_id}/oneToManyNatRules'}
onetoone_urls = {'nat': '/networks/{net_id}/oneToOneNatRules'}
port_forwarding_urls = {'nat': '/networks/{net_id}/portForwardingRules'}
meraki.url_catalog['1:many'] = onetomany_urls
meraki.url_catalog['1:1'] = onetoone_urls
meraki.url_catalog['port_forwarding'] = port_forwarding_urls
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
net_id = meraki.params['net_id']
if net_id is None:
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.get_net_id(org_id, meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['subset'][0] == 'all':
path = meraki.construct_path('1:many', net_id=net_id)
data = {'1:many': meraki.request(path, method='GET')}
path = meraki.construct_path('1:1', net_id=net_id)
data['1:1'] = meraki.request(path, method='GET')
path = meraki.construct_path('port_forwarding', net_id=net_id)
data['port_forwarding'] = meraki.request(path, method='GET')
meraki.result['data'] = data
else:
for subset in meraki.params['subset']:
path = meraki.construct_path(subset, net_id=net_id)
data = {subset: meraki.request(path, method='GET')}
try:
meraki.result['data'][subset] = data
except KeyError:
meraki.result['data'] = {subset: data}
elif meraki.params['state'] == 'present':
meraki.result['data'] = dict()
if one_to_one_payload is not None:
path = meraki.construct_path('1:1', net_id=net_id)
current = meraki.request(path, method='GET')
if meraki.is_update_required(current, one_to_one_payload):
if meraki.module.check_mode is True:
diff = recursive_diff(current, one_to_one_payload)
current.update(one_to_one_payload)
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
meraki.result['diff']['before'].update({'one_to_one': diff[0]})
meraki.result['diff']['after'].update({'one_to_one': diff[1]})
meraki.result['data'] = {'one_to_one': current}
meraki.result['changed'] = True
else:
r = meraki.request(path, method='PUT', payload=json.dumps(one_to_one_payload))
if meraki.status == 200:
diff = recursive_diff(current, one_to_one_payload)
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
meraki.result['diff']['before'].update({'one_to_one': diff[0]})
meraki.result['diff']['after'].update({'one_to_one': diff[1]})
meraki.result['data'] = {'one_to_one': r}
meraki.result['changed'] = True
else:
meraki.result['data']['one_to_one'] = current
if one_to_many_payload is not None:
path = meraki.construct_path('1:many', net_id=net_id)
current = meraki.request(path, method='GET')
if meraki.is_update_required(current, one_to_many_payload):
if meraki.module.check_mode is True:
diff = recursive_diff(current, one_to_many_payload)
current.update(one_to_many_payload)
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
meraki.result['diff']['before'].update({'one_to_many': diff[0]})
meraki.result['diff']['after'].update({'one_to_many': diff[1]})
meraki.result['data']['one_to_many'] = current
meraki.result['changed'] = True
else:
r = meraki.request(path, method='PUT', payload=json.dumps(one_to_many_payload))
if meraki.status == 200:
diff = recursive_diff(current, one_to_many_payload)
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
meraki.result['diff']['before'].update({'one_to_many': diff[0]})
meraki.result['diff']['after'].update({'one_to_many': diff[1]})
meraki.result['data'].update({'one_to_many': r})
meraki.result['changed'] = True
else:
meraki.result['data']['one_to_many'] = current
if port_forwarding_payload is not None:
path = meraki.construct_path('port_forwarding', net_id=net_id)
current = meraki.request(path, method='GET')
if meraki.is_update_required(current, port_forwarding_payload):
if meraki.module.check_mode is True:
diff = recursive_diff(current, port_forwarding_payload)
current.update(port_forwarding_payload)
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
meraki.result['diff']['before'].update({'port_forwarding': diff[0]})
meraki.result['diff']['after'].update({'port_forwarding': diff[1]})
meraki.result['data']['port_forwarding'] = current
meraki.result['changed'] = True
else:
r = meraki.request(path, method='PUT', payload=json.dumps(port_forwarding_payload))
if meraki.status == 200:
if 'diff' not in meraki.result:
meraki.result['diff'] = {'before': {}, 'after': {}}
diff = recursive_diff(current, port_forwarding_payload)
meraki.result['diff']['before'].update({'port_forwarding': diff[0]})
meraki.result['diff']['after'].update({'port_forwarding': diff[1]})
meraki.result['data'].update({'port_forwarding': r})
meraki.result['changed'] = True
else:
meraki.result['data']['port_forwarding'] = current
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| anryko/ansible | lib/ansible/modules/network/meraki/meraki_nat.py | Python | gpl-3.0 | 29,958 |
#This program counts how many multiples of 2 numbers are in the limit you set
print("This program calculates the amount of multiples within a certain range")
name = input("Who is using this program?\n")
choice = input("Would you like to begin? Y/N\n").lower()
#function for fizzing or buzzing depending on which multiple is being counted
def fizzbuzz(n):
result = []
#creates a list called 'result' and adds to it through an if/else statement
for x in range(1, n+1):
if x % multiple1 == 0 and x % multiple2 == 0:
result.append("fizz buzz")
elif x % multiple1 == 0:
result.append('fizz')
elif x % multiple2 == 0:
result.append('buzz')
else:
result.append(str(x))
return result
while choice not in ["y", "n", "yes", "no"]:
print("Incorrect entry, please try again.")
choice = input("Would you like to begin? Y/N\n").lower()
#Turns input into int form and defines limit as a string storing an int
while choice in ["y", "yes"]:
limit = int(input("What limit would you like to set the count to? "))
multiple1 = int(input("What is the first multiple? "))
multiple2 = int(input("What is the second multiple? "))
fizz_or_buzz = input("Would you like to count the number of multiples of %d, of %d or both? " % (multiple1, multiple2)).lower()
from collections import Counter
#if statement for calculating which multiple is needed to be counted
if fizz_or_buzz in ['both']:
counter = str(fizzbuzz(limit).count("fizz buzz"))
print("The total number of multiples of both (within the set limit) are " + counter)
elif fizz_or_buzz in ["%d" % (multiple1)]:
counter = str(fizzbuzz(limit).count("fizz") + fizzbuzz(limit).count("fizz buzz"))
print("The total number of multiples of " + str(multiple1) + " (within the set limit) are " + counter)
elif fizz_or_buzz in ["%d" % (multiple2)]:
counter = str(fizzbuzz(limit).count("fizz buzz") + fizzbuzz(limit).count("fizz buzz"))
print("The total number of multiples of " + str(multiple2) + " (within the set limit) are " + counter)
else:
print("Sorry, there has been an error, please try again")
choice = input("Continue with another set of numbers? Y/N\n").lower()
while choice not in ["y", "n", "yes", "no"]:
print("Incorrect entry, please try again.")
choice = input("Continue with another set of numbers? Y/N\n").lower()
print("Thank you for using this program " + name[0].upper() + name[1:].lower(). + "Goodbye!")
| Vite94/pythondepo | FizzBuzz.py | Python | mit | 2,663 |
from behave import *
import re
import os
from rdopkg.utils import distgitmagic
from rdopkg.utils import specfile
from rdopkg.utils.distgitmagic import git
from rdopkg.utils.testing import exdiff
def _create_distgit(context, version, release, magic_comments=None):
name = 'foo-bar'
context.execute_steps(u'Given a temporary directory')
context.distgitdir = distgitmagic.create_sample_distgit(
name, version=version, release=release, magic_comments=magic_comments)
os.chdir(context.distgitdir)
# collect .spec state to compare against after actions
spec = specfile.Spec()
context.old_changelog_entry = spec.get_last_changelog_entry()
context.old_commit = git.current_commit()
@given('a distgit at Version {version} and Release {release} with magic comments') # noqa
def step_impl(context, version, release):
_create_distgit(context, version, release, magic_comments=context.text)
@given('a distgit at Version {version} and Release {release}')
def step_impl(context, version, release):
_create_distgit(context, version, release)
@given('a distgit at Version {version}')
def step_impl(context, version):
step = u'Given a distgit at Version %s and Release 2' % version
context.execute_steps(step)
@given('a distgit')
def step_impl(context):
context.execute_steps(u'Given a distgit at Version 1.2.3 and Release 2')
@given('a distgit with Change-Id {changeid}')
def step_impl(context, changeid):
context.execute_steps(u'Given a distgit at Version 1.2.3 and Release 2')
git('commit', '--amend', '-m',
context.old_commit + '\n\nChange-Id: %s' % changeid)
context.old_commit = git.current_commit()
@given('a patches branch with {n:n} patches')
def step_impl(context, n):
distgitmagic.create_sample_patches_branch(n)
@given('a patches branch with following patches')
def step_impl(context):
patches = context.text.splitlines()
distgitmagic.create_sample_patches_branch(patches=patches)
@given('a patches branch with {n:n} patches without version git tag')
def step_impl(context, n):
distgitmagic.create_sample_patches_branch(n, version_tag=False)
@given('a new version {version} with {n:n} patches from patches branch')
def step_impl(context, version, n):
distgitmagic.create_sample_upstream_new_version(version, 9, n)
@given('a new version {version}')
def step_impl(context, version):
context.execute_steps(
u'Given a new version %s with 0 patches from patches branch' %
version)
@given(u'a local file {fn} containing "{text}"')
def step_impl(context, fn, text):
with open(os.path.join(context.distgitdir, fn), 'w') as f:
f.write(text)
@given(u'a local file {fn}')
def step_impl(context, fn):
with open(os.path.join(context.distgitdir, fn), 'w') as f:
f.write(context.text)
@when(u'I set .spec file patches_base={base}')
def step_impl(context, base):
spec = specfile.Spec()
spec.set_patches_base(base)
spec.save()
@when(u'I set .spec file patches_base to existing commit +{n:n}')
def step_impl(context, n):
pb = git('show-ref', 'master-patches')[:8]
if n:
pb = '%s+%s' % (pb, n)
context.execute_steps(u'When i set .spec file patches_base=%s' % pb)
@when('I change .spec file tag {tag} to {value}')
def step_impl(context, tag, value):
spec = specfile.Spec()
spec.set_tag(tag, value)
spec.save()
@when(u'I prepend .spec file with')
def step_impl(context):
spec = specfile.Spec()
spec._txt = context.text + spec.txt
spec.save()
@when(u'I undo all changes')
def step_impl(context):
git("stash")
assert git.is_clean()
@when('I add description to .spec changelog')
def step_impl(context):
spec = specfile.Spec()
spec._txt, n = re.subn('(\n%changelog\n\*[^\n]+\n)\n',
'\g<1>- Description of a change\n',
spec.txt)
assert n == 1
spec.save()
@then('.spec file tag {tag} is {value}')
def step_impl(context, tag, value):
spec = specfile.Spec()
assert spec.get_tag(tag) == value, \
"{0} != {1}".format(spec.get_tag(tag), value)
@then('.spec file contains new changelog entry with {n:n} lines')
def step_impl(context, n):
spec = specfile.Spec()
entry = spec.get_last_changelog_entry()
assert len(entry[1]) == n
assert entry != context.old_changelog_entry
@then('.spec file doesn\'t contain new changelog entries')
def step_impl(context):
entry = specfile.Spec().get_last_changelog_entry()
assert entry == context.old_changelog_entry
@then('.spec file contains new changelog entry with {text}')
def step_impl(context, text):
spec = specfile.Spec()
entry = spec.get_last_changelog_entry()
changelog_block = '\n'.join(entry[1])
assert text in changelog_block, \
"[{0}] not found in [{1}]".format(text, changelog_block)
assert entry != context.old_changelog_entry
@then('.spec file has {n:n} patches defined')
def step_impl(context, n):
spec = specfile.Spec()
assert spec.get_n_patches() == n
@then('.spec file doesn\'t contain patches_base')
def step_impl(context):
spec = specfile.Spec()
assert spec.get_patches_base() == (None, 0)
@then('.spec file doesn\'t contain patches_ignore')
def step_impl(context):
spec = specfile.Spec()
assert spec.get_patches_ignore_regex() is None
@then('.spec file contains /{rex}/')
def step_impl(context, rex):
spec = specfile.Spec()
assert re.search(rex, spec.txt), "/%s/ not found in .spec" % rex
@then('.spec file contains {text}')
def step_impl(context, text):
spec = specfile.Spec()
assert text in spec.txt, "%s not found in .spec" % text
@then('new commit was created')
def step_impl(context):
new_commit = git.current_commit()
assert new_commit != context.old_commit
@then(u'no new commit was created')
def step_impl(context):
new_commit = git.current_commit()
assert new_commit == context.old_commit
@then(u'last commit message contains {simple_string}')
def step_impl(context, simple_string):
msg = git.current_commit_message()
assert simple_string in msg, \
(u"'{0}' not found in:\n{1}".format(simple_string, msg)
).encode('ascii', 'replace')
@then(u'last commit message is')
def step_impl(context):
msg = git.current_commit_message()
assert context.text == msg, exdiff(
context.text, msg,
header="Commit message differs from expected format:")
@then(u'git is clean')
def step_impl(context):
assert git.is_clean(), git('status').encode('ascii', 'replace')
| redhat-openstack/rdopkg | features/steps/distgit.py | Python | apache-2.0 | 6,597 |
"""THIRD PARTY LIBRARY IMPORTS"""
import win32api
"""LOCAL LIBRARY IMPORTS"""
from moduleForFindingTuplesTime import create_dict
from moduleForFindingPressTimes import create_dict
from moduleForRecordingTimelines import start_recording
__verison__ = '1.0'
__author__ = 'Matthew Nowak and Zachary Nowak' | zacandcheese/Keyboard-Biometric-Project | Project_Tuples_Alpha/__init__.py | Python | mit | 305 |
# Copyright (c) 2014-2016 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Handle binary stream returns in NCStream format."""
from collections import OrderedDict
import itertools
import logging
import zlib
import numpy as np
from . import cdmrfeature_pb2 as cdmrf
from . import ncStream_pb2 as stream # noqa
MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
MAGIC_DATA2 = b'\xab\xeb\xbe\xba'
MAGIC_VDATA = b'\xab\xef\xfe\xba'
MAGIC_VEND = b'\xed\xef\xfe\xda'
MAGIC_ERR = b'\xab\xad\xba\xda'
MAGIC_HEADERCOV = b'\xad\xed\xde\xda'
MAGIC_DATACOV = b'\xab\xed\xde\xba'
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
#
# NCStream handling
#
def read_ncstream_data(fobj):
"""Handle reading an NcStream v1 data block from a file-like object."""
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks], dtype=object)
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtype_lookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError(f'Compression type {data.compress} not implemented!')
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError(f"Don't know how to handle data type: {data.dataType}")
def read_ncstream_data2(fobj):
"""Handle reading an NcStream v2 data block from a file-like object."""
data = read_proto_object(fobj, stream.DataCol)
return datacol_to_array(data)
def read_ncstream_err(fobj):
"""Handle reading an NcStream error from a file-like object and raise as error."""
err = read_proto_object(fobj, stream.Error)
raise RuntimeError(err.message)
ncstream_table = {MAGIC_HEADER: lambda f: read_proto_object(f, stream.Header),
MAGIC_DATA: read_ncstream_data,
MAGIC_DATA2: read_ncstream_data2,
MAGIC_ERR: read_ncstream_err}
def read_ncstream_messages(fobj):
"""Read a collection of NcStream messages from a file-like object."""
return read_messages(fobj, ncstream_table)
#
# CDMRemoteFeature handling
#
cdmrf_table = {MAGIC_HEADERCOV: lambda f: read_proto_object(f, cdmrf.CoverageDataset),
MAGIC_DATACOV: lambda f: read_proto_object(f, cdmrf.CoverageDataResponse),
MAGIC_DATA2: read_ncstream_data2, # For coordinates
MAGIC_ERR: read_ncstream_err}
def read_cdmrf_messages(fobj):
"""Read a collection of CDMRemoteFeature messages from a file-like object."""
return read_messages(fobj, cdmrf_table)
#
# General Utilities
#
def read_messages(fobj, magic_table):
"""Read messages from a file-like object until stream is exhausted."""
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
func = magic_table.get(magic)
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join(f'{b:02x}'
for b in bytearray(magic))))
return messages
def read_proto_object(fobj, klass):
"""Read a block of data and parse using the given protobuf object."""
log.debug('%s chunk', klass.__name__)
obj = klass()
obj.ParseFromString(read_block(fobj))
log.debug('Header: %s', str(obj))
return obj
def read_magic(fobj):
"""Read magic bytes.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
magic byte sequence read
"""
return fobj.read(4)
def read_block(fobj):
"""Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read
"""
num = read_var_int(fobj)
log.debug('Next block: %d bytes', num)
return fobj.read(num)
def process_vlen(data_header, array):
"""Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array
"""
source = iter(array)
return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype)
for size in data_header.vlens], dtype=object)
def datacol_to_array(datacol):
"""Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data
"""
if datacol.dataType == stream.STRING:
arr = np.array(datacol.stringdata, dtype=object)
elif datacol.dataType == stream.OPAQUE:
arr = np.array(datacol.opaquedata, dtype=object)
elif datacol.dataType == stream.STRUCTURE:
members = OrderedDict((mem.name, datacol_to_array(mem))
for mem in datacol.structdata.memberData)
log.debug('Struct members:\n%s', str(members))
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()])
log.debug('Struct dtype: %s', str(dt))
arr = np.empty((datacol.nelems,), dtype=dt)
for name, arr_data in members.items():
arr[name] = arr_data
else:
# Make an appropriate datatype
endian = '>' if datacol.bigend else '<'
dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian)
# Turn bytes into an array
arr = np.frombuffer(datacol.primdata, dtype=dt)
if arr.size != datacol.nelems:
log.warning('Array size %d does not agree with nelems %d',
arr.size, datacol.nelems)
if datacol.isVlen:
arr = process_vlen(datacol, arr)
try:
arr = reshape_array(datacol, arr)
except ValueError:
# In this case, the array collapsed, need different resize that
# correctly sizes from elements
shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],)
arr = arr.reshape(*shape)
else:
arr = reshape_array(datacol, arr)
return arr
def reshape_array(data_header, array):
"""Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data
"""
shape = tuple(r.size for r in data_header.section.range)
if shape:
return array.reshape(*shape)
else:
return array
# STRUCTURE = 8;
# SEQUENCE = 9;
_dtype_lookup = {stream.CHAR: 'S1', stream.BYTE: 'b', stream.SHORT: 'i2',
stream.INT: 'i4', stream.LONG: 'i8', stream.FLOAT: 'f4',
stream.DOUBLE: 'f8', stream.STRING: 'O',
stream.ENUM1: 'B', stream.ENUM2: 'u2', stream.ENUM4: 'u4',
stream.OPAQUE: 'O', stream.UBYTE: 'B', stream.USHORT: 'u2',
stream.UINT: 'u4', stream.ULONG: 'u8'}
def data_type_to_numpy(datatype, unsigned=False):
"""Convert an ncstream datatype to a numpy one."""
basic_type = _dtype_lookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('=' + basic_type)
def struct_to_dtype(struct):
"""Convert a Structure specification to a numpy structured dtype."""
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned))
for var in struct.vars]
for s in struct.structs:
fields.append((str(s.name), struct_to_dtype(s)))
log.debug('Structure fields: %s', fields)
dt = np.dtype(fields)
return dt
def unpack_variable(var):
"""Unpack an NCStream Variable into information we can use."""
# If we actually get a structure instance, handle turning that into a variable
if var.dataType == stream.STRUCTURE:
return None, struct_to_dtype(var), 'Structure'
elif var.dataType == stream.SEQUENCE:
log.warning('Sequence support not implemented!')
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.name
if var.data:
log.debug('Storing variable data: %s %s', dt, var.data)
if var.dataType == stream.STRING:
data = var.data
else:
# Always sent big endian
data = np.frombuffer(var.data, dtype=dt.newbyteorder('>'))
else:
data = None
return data, dt, type_name
_attr_converters = {stream.Attribute.BYTE: np.dtype('>b'),
stream.Attribute.SHORT: np.dtype('>i2'),
stream.Attribute.INT: np.dtype('>i4'),
stream.Attribute.LONG: np.dtype('>i8'),
stream.Attribute.FLOAT: np.dtype('>f4'),
stream.Attribute.DOUBLE: np.dtype('>f8')}
def unpack_attribute(att):
"""Unpack an embedded attribute into a python or numpy object."""
if att.unsigned:
log.warning('Unsupported unsigned attribute!')
# TDS 5.0 now has a dataType attribute that takes precedence
if att.len == 0: # Empty
val = None
elif att.dataType == stream.STRING: # Then look for new datatype string
val = att.sdata
elif att.dataType: # Then a non-zero new data type
val = np.frombuffer(att.data,
dtype='>' + _dtype_lookup[att.dataType], count=att.len)
elif att.type: # Then non-zero old-data type0
val = np.frombuffer(att.data,
dtype=_attr_converters[att.type], count=att.len)
elif att.sdata: # This leaves both 0, try old string
val = att.sdata
else: # Assume new datatype is Char (0)
val = np.array(att.data, dtype=_dtype_lookup[att.dataType])
if att.len == 1:
val = val[0]
return att.name, val
def read_var_int(file_obj):
"""Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read
"""
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val |= ((next_val & 0x7F) << shift)
shift += 7
if not next_val & 0x80:
break
return val
| Unidata/siphon | src/siphon/cdmr/ncstream.py | Python | bsd-3-clause | 13,574 |
#!/usr/bin/python
import sys
import time
import apt_pkg
import apt
import apt.progress.base
class TextProgress(apt.progress.base.OpProgress):
def __init__(self):
self.last = 0.0
def update(self, percent):
if (self.last + 1.0) <= percent:
sys.stdout.write("\rProgress: %i.2 " % (percent))
self.last = percent
if percent >= 100:
self.last = 0.0
def done(self):
self.last = 0.0
print "\rDone "
class TextFetchProgress(apt.progress.base.AcquireProgress):
def __init__(self):
pass
def start(self):
pass
def stop(self):
pass
def fail(self, item):
print 'fail', item
def fetch(self, item):
print 'fetch', item
def ims_hit(self, item):
print 'ims_hit', item
def pulse(self, owner):
print "pulse: CPS: %s/s; Bytes: %s/%s; Item: %s/%s" % (
apt_pkg.size_to_str(self.current_cps),
apt_pkg.size_to_str(self.current_bytes),
apt_pkg.size_to_str(self.total_bytes),
self.current_items,
self.total_items)
return True
def media_change(self, medium, drive):
print "Please insert medium %s in drive %s" % (medium, drive)
sys.stdin.readline()
#return False
class TextInstallProgress(apt.progress.base.InstallProgress):
def __init__(self):
apt.progress.base.InstallProgress.__init__(self)
pass
def start_update(self):
print "start_update"
def finish_update(self):
print "finish_update"
def status_change(self, pkg, percent, status):
print "[%s] %s: %s" % (percent, pkg, status)
def update_interface(self):
apt.progress.base.InstallProgress.update_interface(self)
# usefull to e.g. redraw a GUI
time.sleep(0.1)
class TextCdromProgress(apt.progress.base.CdromProgress):
def __init__(self):
pass
# update is called regularly so that the gui can be redrawn
def update(self, text, step):
# check if we actually have some text to display
if text != "":
print "Update: %s %s" % (text.strip(), step)
def ask_cdrom_name(self):
print "Please enter cd-name: ",
cd_name = sys.stdin.readline()
return (True, cd_name.strip())
def change_cdrom(self):
print "Please insert cdrom and press <ENTER>"
answer = sys.stdin.readline()
print answer
return True
if __name__ == "__main__":
c = apt.Cache()
pkg = c["3dchess"]
if pkg.is_installed:
pkg.mark_delete()
else:
pkg.mark_install()
res = c.commit(TextFetchProgress(), TextInstallProgress())
print res
| HuayraLinux/python-apt | doc/examples/progress.py | Python | gpl-2.0 | 2,778 |
from tensorflow.python.ops import init_ops
from tensorflow.python.util import nest
import tensorflow as tf
def stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None,
dtype=None, sequence_length=None, parallel_iterations=None, scope=None,
time_pooling=None, pooling_avg=None, initializer=None, inter_layers=None,
inter_layer_activation=None, batch_norm=None, inter_layer_keep_prob=None,
pervasive_dropout=None, training=True):
states_fw = []
states_bw = []
prev_layer = inputs
with tf.variable_scope(scope or "stack_bidirectional_rnn", initializer=initializer):
for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)):
initial_state_fw = None
initial_state_bw = None
if initial_states_fw:
initial_state_fw = initial_states_fw[i]
if initial_states_bw:
initial_state_bw = initial_states_bw[i]
with tf.variable_scope('cell_{}'.format(i)):
outputs, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
prev_layer,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
sequence_length=sequence_length,
parallel_iterations=parallel_iterations,
dtype=dtype)
# Concat the outputs to create the new input.
prev_layer = tf.concat(outputs, axis=2)
if time_pooling and i < len(cells_fw) - 1:
prev_layer, sequence_length = apply_time_pooling(prev_layer, sequence_length, time_pooling[i],
pooling_avg)
if inter_layers and len(inter_layers) > i and inter_layers[i]:
layer_size = inter_layers[i]
prev_layer = tf.layers.dense(prev_layer, layer_size, use_bias=not batch_norm)
if batch_norm:
prev_layer = tf.layers.batch_normalization(prev_layer, training=training)
if inter_layer_activation.lower() == 'relu':
prev_layer = tf.nn.relu(prev_layer)
if inter_layer_keep_prob is not None:
noise_shape = [1, 1, tf.shape(prev_layer)[2]] if pervasive_dropout else None
prev_layer = tf.nn.dropout(prev_layer, keep_prob=inter_layer_keep_prob,
noise_shape=noise_shape)
states_fw.append(state_fw)
states_bw.append(state_bw)
return prev_layer, tuple(states_fw), tuple(states_bw)
def apply_time_pooling(inputs, sequence_length, stride, pooling_avg=False):
shape = [tf.shape(inputs)[0], tf.shape(inputs)[1], inputs.get_shape()[2].value]
if pooling_avg:
inputs_ = [inputs[:, i::stride, :] for i in range(stride)]
max_len = tf.shape(inputs_[0])[1]
for k in range(1, stride):
len_ = tf.shape(inputs_[k])[1]
paddings = tf.stack([[0, 0], [0, max_len - len_], [0, 0]])
inputs_[k] = tf.pad(inputs_[k], paddings=paddings)
inputs = tf.reduce_sum(inputs_, axis=0) / len(inputs_)
else:
inputs = inputs[:, ::stride, :]
inputs = tf.reshape(inputs, tf.stack([shape[0], tf.shape(inputs)[1], shape[2]]))
sequence_length = (sequence_length + stride - 1) // stride # rounding up
return inputs, sequence_length
class CellInitializer(init_ops.Initializer):
"""
Orthogonal initialization of recurrent connections, like in Bahdanau et al. 2015
"""
def __init__(self, cell_size):
self.cell_size = cell_size
self.default_initializer = tf.get_variable_scope().initializer or init_ops.glorot_uniform_initializer()
self.initializer = tf.orthogonal_initializer()
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
if len(shape) == 1 or shape[1] % self.cell_size != 0:
return self.default_initializer(shape, dtype=dtype, partition_info=partition_info)
input_size = shape[0] - self.cell_size
W, U = [], []
for _ in range(shape[1] // self.cell_size):
W.append(self.default_initializer(shape=[input_size, self.cell_size]))
U.append(self.initializer(shape=[self.cell_size, self.cell_size]))
return tf.concat([tf.concat(W, axis=1), tf.concat(U, axis=1)], axis=0)
class DropoutGRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None,
layer_norm=False, state_keep_prob=None, input_keep_prob=None, input_size=None, final=False):
super(DropoutGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or tf.nn.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._layer_norm = layer_norm
self._state_keep_prob = state_keep_prob
self._input_keep_prob = input_keep_prob
self._final = final
def batch_noise(s):
s = tf.concat(([1], tf.TensorShape(s).as_list()), 0)
return tf.random_uniform(s)
if input_keep_prob is not None:
self._input_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), input_size)
if state_keep_prob is not None:
self._state_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), num_units)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@staticmethod
def _enumerated_map_structure(map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure(enumerated_fn, *args, **kwargs)
@staticmethod
def _dropout(values, recurrent_noise, keep_prob):
def dropout(index, value, noise):
random_tensor = keep_prob + noise
binary_tensor = tf.floor(random_tensor)
ret = tf.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
return DropoutGRUCell._enumerated_map_structure(dropout, values, recurrent_noise)
def call(self, inputs, state):
inputs = tf.concat(inputs, axis=1)
input_size = inputs.shape[1]
state_size = state.shape[1]
dtype = inputs.dtype
if self._state_keep_prob:
dropped_state = DropoutGRUCell._dropout(state, self._state_noise, self._state_keep_prob)
else:
dropped_state = state
if self._input_keep_prob:
dropped_inputs = DropoutGRUCell._dropout(inputs, self._input_noise, self._input_keep_prob)
else:
dropped_inputs = inputs
with tf.variable_scope('state'):
state_weights = tf.get_variable('kernel', [state_size, 3 * self._num_units], dtype=dtype, initializer=self._kernel_initializer)
with tf.variable_scope('input'):
input_weights = tf.get_variable('kernel', [input_size, 3 * self._num_units], dtype=dtype, initializer=self._kernel_initializer)
bias = tf.get_variable('bias', [3 * self._num_units], dtype=dtype, initializer=self._bias_initializer)
inputs_ = tf.matmul(dropped_inputs, input_weights)
state_ = tf.matmul(dropped_state, state_weights)
if self._layer_norm:
state_ = tf.contrib.layers.layer_norm(state_)
inputs_ = tf.contrib.layers.layer_norm(inputs_)
size = 2 * self._num_units
value = tf.nn.sigmoid(state_[:,:size] + inputs_[:,:size] + bias[:size])
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
c = self._activation(inputs_[:,size:] + state_[:,size:] * r + bias[size:])
new_h = u * state + (1 - u) * c
return new_h, new_h
class GRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None,
layer_norm=False):
super(GRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or tf.nn.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._layer_norm = layer_norm
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
inputs = tf.concat(inputs, axis=1)
input_size = inputs.shape[1]
state_size = state.shape[1]
dtype = inputs.dtype
with tf.variable_scope("gates"):
bias_initializer = self._bias_initializer
if self._bias_initializer is None and not self._layer_norm: # bias of 1 for layer norm?
bias_initializer = init_ops.constant_initializer(1.0, dtype=dtype)
bias = tf.get_variable('bias', [2 * self._num_units], dtype=dtype, initializer=bias_initializer)
weights = tf.get_variable('kernel', [input_size + state_size, 2 * self._num_units], dtype=dtype,
initializer=self._kernel_initializer)
inputs_ = tf.matmul(inputs, weights[:input_size])
state_ = tf.matmul(state, weights[input_size:])
if self._layer_norm:
inputs_ = tf.contrib.layers.layer_norm(inputs_, scope='inputs')
state_ = tf.contrib.layers.layer_norm(state_, scope='state')
value = tf.nn.sigmoid(inputs_ + state_ + bias)
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
with tf.variable_scope("candidate"):
bias = tf.get_variable('bias', [self._num_units], dtype=dtype, initializer=self._bias_initializer)
weights = tf.get_variable('kernel', [input_size + state_size, self._num_units], dtype=dtype,
initializer=self._kernel_initializer)
c = tf.matmul(tf.concat([inputs, r * state], axis=1), weights)
if self._layer_norm:
c = tf.contrib.layers.layer_norm(c)
c = self._activation(c + bias)
new_h = u * state + (1 - u) * c
return new_h, new_h
class PLSTM(tf.nn.rnn_cell.RNNCell):
"""
Implementation of Projection-LSTM and Factorized-LSTM (https://arxiv.org/abs/1703.10722)
"""
def __init__(self, num_units, forget_bias=1.0, activation=None, reuse=None, fact_size=None, proj_size=None):
super(PLSTM, self).__init__(_reuse=reuse)
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation or tf.tanh
self._fact_size = fact_size
self._proj_size = proj_size
@property
def state_size(self):
if self._proj_size is not None:
return self._num_units + self._proj_size
else:
return 2 * self._num_units
@property
def output_size(self):
if self._proj_size is not None:
return self._proj_size
else:
return self._num_units
def call(self, inputs, state):
sigmoid = tf.sigmoid
size = [self.state_size - self.output_size, self.output_size]
c, h = tf.split(value=state, num_or_size_splits=size, axis=1)
T = tf.concat([inputs, h], axis=1)
if self._fact_size is not None:
T = tf.layers.dense(T, self._fact_size, use_bias=False, name='factorization')
T = tf.layers.dense(T, 4 * self._num_units, use_bias=True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(T, num_or_size_splits=4, axis=1)
new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j)
new_h = self._activation(new_c) * sigmoid(o)
if self._proj_size is not None:
new_h = tf.layers.dense(new_h, self._proj_size, use_bias=False, name='projection')
new_state = tf.concat([new_c, new_h], 1)
return new_h, new_state
def get_state_size(cell_type, cell_size, proj_size=None, layers=1):
if cell_type.lower() == 'plstm' and proj_size is not None:
return proj_size, (proj_size + cell_size) * layers
elif cell_type.lower() in ('lstm', 'plstm'):
return cell_size, cell_size * 2 * layers
else:
return cell_size, cell_size * layers
| eske/seq2seq | translate/rnn.py | Python | apache-2.0 | 12,958 |
import os, glob, platform
#find out if we're running on mac or linux and set the dynamic library extension
dylib_ext = ""
if platform.system().lower() == "darwin":
dylib_ext = ".dylib"
else:
dylib_ext = ".so"
print("Running on " + platform.system())
#make sure the release folder exists, and clean out any .o/.so file if there are any
if not os.path.exists( "release" ):
os.makedirs( "release" )
os.chdir( "release" )
o_files = glob.glob( "*.o" )
o_files.extend( glob.glob( "*" + dylib_ext ) )
for o_file in o_files:
os.remove( o_file )
os.chdir( ".." )
#make sure the debug folder exists, and clean out any .o/.so files if there are any
if not os.path.exists( "debug" ):
os.makedirs( "debug" )
os.chdir( "debug" )
o_files = glob.glob( "*.o" );
o_files.extend( glob.glob( "*" + dylib_ext ) )
for o_file in o_files:
os.remove( o_file )
os.chdir( ".." )
#find all the cpp files in /source. We'll compile all of them
os.chdir( "source" )
cpp_files = glob.glob( "*.cpp" );
os.chdir( ".." )
#specify the search paths/dependencies/options for gcc
include_paths = [ "../include" ]
link_paths = [ "../lib" ]
link_dependencies = [ "-lAnalyzer" ] #refers to libAnalyzer.dylib or libAnalyzer.so
debug_compile_flags = "-O0 -w -c -fpic -g -std=c++11"
release_compile_flags = "-O3 -w -c -fpic -std=c++11"
#loop through all the cpp files, build up the gcc command line, and attempt to compile each cpp file
for cpp_file in cpp_files:
#g++
command = "g++ "
#include paths
for path in include_paths:
command += "-I\"" + path + "\" "
release_command = command
release_command += release_compile_flags
release_command += " -o\"release/" + cpp_file.replace( ".cpp", ".o" ) + "\" " #the output file
release_command += "\"" + "source/" + cpp_file + "\"" #the cpp file to compile
debug_command = command
debug_command += debug_compile_flags
debug_command += " -o\"debug/" + cpp_file.replace( ".cpp", ".o" ) + "\" " #the output file
debug_command += "\"" + "source/" + cpp_file + "\"" #the cpp file to compile
#run the commands from the command line
print(release_command)
os.system( release_command )
print(debug_command)
os.system( debug_command )
#lastly, link
#g++
command = "g++ "
#add the library search paths
for link_path in link_paths:
command += "-L\"" + link_path + "\" "
#add libraries to link against
for link_dependency in link_dependencies:
command += link_dependency + " "
#make a dynamic (shared) library (.so/.dylib)
if dylib_ext == ".dylib":
command += "-dynamiclib "
else:
command += "-shared "
#figgure out what the name of this analyzer is
analyzer_name = ""
for cpp_file in cpp_files:
if cpp_file.endswith( "Analyzer.cpp" ):
analyzer_name = cpp_file.replace( "Analyzer.cpp", "" )
break
#the files to create (.so/.dylib files)
if dylib_ext == ".dylib":
release_command = command + "-o release/lib" + analyzer_name + "Analyzer.dylib "
debug_command = command + "-o debug/lib" + analyzer_name + "Analyzer.dylib "
else:
release_command = command + "-o\"release/lib" + analyzer_name + "Analyzer.so\" "
debug_command = command + "-o\"debug/lib" + analyzer_name + "Analyzer.so\" "
#add all the object files to link
for cpp_file in cpp_files:
release_command += "release/" + cpp_file.replace( ".cpp", ".o" ) + " "
debug_command += "debug/" + cpp_file.replace( ".cpp", ".o" ) + " "
#run the commands from the command line
print(release_command)
os.system( release_command )
print(debug_command)
os.system( debug_command )
| ewfuentes/SaleaeSDIOAnalyzer | build_analyzer.py | Python | mit | 3,742 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Share Resource formatter."""
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.resource import ResourceFormatter
from aquilon.aqdb.model import Share
class ShareFormatter(ResourceFormatter):
def extra_details(self, share, indent=""):
details = []
details.append(indent + " Server: %s" % share.server)
details.append(indent + " Mountpoint: %s" % share.mount)
details.append(indent + " Disk Count: %d" % share.disk_count)
details.append(indent + " Machine Count: %d" % share.machine_count)
return details
def format_proto(self, share, skeleton=None):
container = skeleton
if not container:
container = self.loaded_protocols[self.protocol].ResourceList()
skeleton = container.resources.add()
skeleton.share.server = share.server
skeleton.share.mount = share.mount
skeleton.share.disk_count = share.disk_count
skeleton.share.machine_count = share.machine_count
return super(ShareFormatter, self).format_proto(share, skeleton)
ObjectFormatter.handlers[Share] = ShareFormatter()
| stdweird/aquilon | lib/python2.6/aquilon/worker/formats/share.py | Python | apache-2.0 | 1,874 |
"""VMware vCenter plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
display,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class VcenterProvider(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(VcenterProvider, self).__init__(args, config_extension='.ini')
self.image = 'ansible/ansible:vcenter-simulator'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(VcenterProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(VcenterProvider, self).cleanup()
def _setup_dynamic(self):
"""Create a vcenter simulator using docker."""
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing vCenter simulator docker container.', verbosity=1)
else:
display.info('Starting a new vCenter simulator docker container.', verbosity=1)
if not self.args.docker and not container_id:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', '80:80',
'-p', '443:443',
'-p', '8080:8080',
'-p', '8989:8989',
'-p', '5000:5000', # control port for flask app in simulator
]
else:
publish_ports = []
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
vcenter_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
vcenter_host = self._get_simulator_address()
display.info('Found vCenter simulator container address: %s' % vcenter_host, verbosity=1)
else:
vcenter_host = 'localhost'
self._set_cloud_config('vcenter_host', vcenter_host)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
# Send the container IP down to the integration test(s)
env['vcenter_host'] = self._get_cloud_config('vcenter_host')
| Deepakkothandan/ansible | test/runner/lib/cloud/vcenter.py | Python | gpl-3.0 | 4,919 |
#!/usr/bin/env python
import unittest
from erlastic import decode, encode
from erlastic.types import *
erlang_term_binaries = [
# nil
([], list, "\x83j"),
# binary
("foo", str, '\x83m\x00\x00\x00\x03foo'),
# atom
(Atom("foo"), Atom, '\x83d\x00\x03foo'),
# atom true
(True, bool, '\x83d\x00\x04true'),
# atom false
(False, bool, '\x83d\x00\x05false'),
# atom none
(None, type(None), '\x83d\x00\x04none'),
# # byte list
# ([102, 111, 111], list, '\x83k\x00\x03foo'),
# small integer
(123, int, '\x83a{'),
# integer
(12345, int, '\x83b\x00\x0009'),
# float
(1.2345, float, '\x83c1.23449999999999993072e+00\x00\x00\x00\x00\x00'),
# tuple
((Atom("foo"), "test", 123), tuple, '\x83h\x03d\x00\x03foom\x00\x00\x00\x04testa{'),
# list
([1024, "test", 4.096], list, '\x83l\x00\x00\x00\x03b\x00\x00\x04\x00m\x00\x00\x00\x04testc4.09600000000000008527e+00\x00\x00\x00\x00\x00j'),
# small big
(12345678901234567890, long, '\x83n\x08\x00\xd2\n\x1f\xeb\x8c\xa9T\xab'),
# large big
(123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890,
long, '\x83o\x00\x00\x01D\x00\xd2\n?\xce\x96\xf1\xcf\xacK\xf1{\xefa\x11=$^\x93\xa9\x88\x17\xa0\xc2\x01\xa5%\xb7\xe3Q\x1b\x00\xeb\xe7\xe5\xd5Po\x98\xbd\x90\xf1\xc3\xddR\x83\xd1)\xfc&\xeaH\xc31w\xf1\x07\xf3\xf33\x8f\xb7\x96\x83\x05t\xeci\x9cY"\x98\x98i\xca\x11bY=\xcc\xa1\xb4R\x1bl\x01\x86\x18\xe9\xa23\xaa\x14\xef\x11[}O\x14RU\x18$\xfe\x7f\x96\x94\xcer?\xd7\x8b\x9a\xa7v\xbd\xbb+\x07X\x94x\x7fI\x024.\xa0\xcc\xde\xef:\xa7\x89~\xa4\xafb\xe4\xc1\x07\x1d\xf3cl|0\xc9P`\xbf\xab\x95z\xa2DQf\xf7\xca\xef\xb0\xc4=\x11\x06*:Y\xf58\xaf\x18\xa7\x81\x13\xdf\xbdTl4\xe0\x00\xee\x93\xd6\x83V\xc9<\xe7I\xdf\xa8.\xf5\xfc\xa4$R\x95\xef\xd1\xa7\xd2\x89\xceu!\xf8\x08\xb1Zv\xa6\xd9z\xdb0\x88\x10\xf3\x7f\xd3sc\x98[\x1a\xac6V\x1f\xad0)\xd0\x978\xd1\x02\xe6\xfbH\x149\xdc).\xb5\x92\xf6\x91A\x1b\xcd\xb8`B\xc6\x04\x83L\xc0\xb8\xafN+\x81\xed\xec?;\x1f\xab1\xc1^J\xffO\x1e\x01\x87H\x0f.ZD\x06\xf0\xbak\xaagVH]\x17\xe6I.B\x14a2\xc1;\xd1+\xea.\xe4\x92\x15\x93\xe9\'E\xd0(\xcd\x90\xfb\x10'),
# reference
(Reference('nonode@nohost', [33, 0, 0], 0), Reference, '\x83r\x00\x03d\x00\rnonode@nohost\x00\x00\x00\x00!\x00\x00\x00\x00\x00\x00\x00\x00'),
# port
(Port('nonode@nohost', 455, 0), Port, '\x83fd\x00\rnonode@nohost\x00\x00\x01\xc7\x00'),
# pid
(PID('nonode@nohost', 31, 0, 0), PID, '\x83gd\x00\rnonode@nohost\x00\x00\x00\x1f\x00\x00\x00\x00\x00'),
# function export
(Export('jobqueue', 'stats', 0), Export, '\x83qd\x00\x08jobqueued\x00\x05statsa\x00'),
]
erlang_term_decode = [
([102, 111, 111], list, '\x83k\x00\x03foo'),
]
erlang_term_encode = [
([102, 111, 111], list, '\x83l\x00\x00\x00\x03afaoaoj'),
]
class ErlangTestCase(unittest.TestCase):
def testDecode(self):
for python, expected_type, erlang in erlang_term_binaries + erlang_term_decode:
decoded = decode(erlang)
self.failUnlessEqual(python, decoded)
self.failUnless(isinstance(decoded, expected_type))
def testEncode(self):
for python, expected_type, erlang in erlang_term_binaries + erlang_term_encode:
encoded = encode(python)
self.failUnlessEqual(erlang, encoded)
if __name__ == '__main__':
unittest.main()
| pombredanne/unuk | src/unuk/libs/erlastic/tests.py | Python | bsd-3-clause | 4,053 |
import os
import sys
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
from urlparse import urlparse
from urllib2 import URLError
import urllib2
from numpy.testing import *
from numpy.compat import asbytes
import numpy.lib._datasource as datasource
def urlopen_stub(url, data=None):
'''Stub to replace urlopen for testing.'''
if url == valid_httpurl():
tmpfile = NamedTemporaryFile(prefix='urltmp_')
return tmpfile
else:
raise URLError('Name or service not known')
old_urlopen = None
def setup():
global old_urlopen
old_urlopen = urllib2.urlopen
urllib2.urlopen = urlopen_stub
def teardown():
urllib2.urlopen = old_urlopen
# A valid website for more robust testing
http_path = 'http://www.google.com/'
http_file = 'index.html'
http_fakepath = 'http://fake.abc.web/site/'
http_fakefile = 'fake.txt'
malicious_files = ['/etc/shadow', '../../shadow',
'..\\system.dat', 'c:\\windows\\system.dat']
magic_line = asbytes('three is the magic number')
# Utility functions used by many TestCases
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
os.close(fd)
return path
def invalid_textfile(filedir):
# Generate and return an invalid filename.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
os.close(fd)
os.remove(path)
return path
def valid_httpurl():
return http_path+http_file
def invalid_httpurl():
return http_fakepath+http_fakefile
def valid_baseurl():
return http_path
def invalid_baseurl():
return http_fakepath
def valid_httpfile():
return http_file
def invalid_httpfile():
return http_fakefile
class TestDataSourceOpen(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.open(valid_httpurl())
def test_InvalidHTTP(self):
url = invalid_httpurl()
self.assertRaises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError, e:
# Regression test for bug fixed in r4342.
assert e.errno is None
def test_InvalidHTTPCacheURLError(self):
self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
assert self.ds.open(local_file)
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
self.assertRaises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
def test_ValidBz2File(self):
try:
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
import nose
raise nose.SkipTest
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
fp.write(magic_line)
fp.close()
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
self.assertEqual(magic_line, result)
class TestDataSourceExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
assert self.ds.exists(valid_httpurl())
def test_InvalidHTTP(self):
self.assertEqual(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
tmpfile = valid_textfile(self.tmpdir)
assert self.ds.exists(tmpfile)
# Test valid local file not in destpath
localdir = mkdtemp()
tmpfile = valid_textfile(localdir)
assert self.ds.exists(tmpfile)
rmtree(localdir)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.ds.exists(tmpfile), False)
class TestDataSourceAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.ds
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertEqual(tmpfile, self.ds.abspath(os.path.split(tmpfile)[-1]))
# Test filename with complete path
self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
assert tmp_path(valid_httpurl()).startswith(self.tmpdir)
assert tmp_path(invalid_httpurl()).startswith(self.tmpdir)
assert tmp_path(tmpfile).startswith(self.tmpdir)
assert tmp_path(tmpfilename).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_ValidFile()
self.test_InvalidHTTP()
self.test_InvalidFile()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryAbspath(TestCase):
def setUp(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.repos._destpath, netloc, \
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
self.assertEqual(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
assert tmp_path(valid_httpfile()).startswith(self.tmpdir)
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
os.sep = '\\'
self.test_ValidHTTP()
self.test_sandboxing()
finally:
os.sep = orig_os_sep
class TestRepositoryExists(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
def tearDown(self):
rmtree(self.tmpdir)
del self.repos
def test_ValidFile(self):
# Create local temp file
tmpfile = valid_textfile(self.tmpdir)
assert self.repos.exists(tmpfile)
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
self.assertEqual(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert self.repos.exists(valid_httpurl())
def test_CachedHTTPFile(self):
localfile = valid_httpurl()
# Create a locally cached temp file with an URL based
# directory structure. This is similar to what Repository.open
# would do.
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
local_path = os.path.join(self.repos._destpath, netloc)
os.mkdir(local_path, 0700)
tmpfile = valid_textfile(local_path)
assert self.repos.exists(tmpfile)
class TestOpenFunc(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
def tearDown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
local_file = valid_textfile(self.tmpdir)
# Test case where destpath is passed in
assert datasource.open(local_file, destpath=self.tmpdir)
# Test case where default destpath is used
assert datasource.open(local_file)
if hasattr(sys, 'gettotalrefcount'):
# skip these, when Python was compiled using the --with-pydebug option
del TestDataSourceOpen
del TestDataSourceExists
del TestDataSourceAbspath
del TestRepositoryExists
del TestOpenFunc
if __name__ == "__main__":
run_module_suite()
| teoliphant/numpy-refactor | numpy/lib/tests/test__datasource.py | Python | bsd-3-clause | 10,225 |
from ..Module import Module
from ..TagData import TagData
### https://tools.ietf.org/html/rfc7848
class smd(Module):
opmap = {
'encodedSignedMark': 'set',
}
def __init__(self, xmlns):
Module.__init__(self, xmlns)
self.name = 'smd'
### RESPONSE parsing
### REQUEST rendering
def render_encoded_signed_mark(self, parent, request, mark):
return self.render_header(request, parent, 'encodedSignedMark', text=mark)
def render_signed_mark(self, parent, request, mark):
return self.render_header(request, parent, 'signedMark', text=mark)
| hiqdev/reppy | heppy/modules/smd.py | Python | bsd-3-clause | 597 |
"""
NGP VAN's `ActionID` Provider
http://developers.ngpvan.com/action-id
"""
from openid.extensions import ax
from .open_id import OpenIdAuth
class ActionIDOpenID(OpenIdAuth):
"""
NGP VAN's ActionID OpenID 1.1 authentication backend
"""
name = 'actionid-openid'
URL = 'https://accounts.ngpvan.com/Home/Xrds'
USERNAME_KEY = 'email'
def get_ax_attributes(self):
"""
Return the AX attributes that ActionID responds with, as well as the
user data result that it must map to.
"""
return [
('http://openid.net/schema/contact/internet/email', 'email'),
('http://openid.net/schema/contact/phone/business', 'phone'),
('http://openid.net/schema/namePerson/first', 'first_name'),
('http://openid.net/schema/namePerson/last', 'last_name'),
('http://openid.net/schema/namePerson', 'fullname'),
]
def setup_request(self, params=None):
"""
Setup the OpenID request
Because ActionID does not advertise the availiability of AX attributes
nor use standard attribute aliases, we need to setup the attributes
manually instead of rely on the parent OpenIdAuth.setup_request()
"""
request = self.openid_request(params)
fetch_request = ax.FetchRequest()
fetch_request.add(ax.AttrInfo(
'http://openid.net/schema/contact/internet/email',
alias='ngpvanemail',
required=True
))
fetch_request.add(ax.AttrInfo(
'http://openid.net/schema/contact/phone/business',
alias='ngpvanphone',
required=False
))
fetch_request.add(ax.AttrInfo(
'http://openid.net/schema/namePerson/first',
alias='ngpvanfirstname',
required=False
))
fetch_request.add(ax.AttrInfo(
'http://openid.net/schema/namePerson/last',
alias='ngpvanlastname',
required=False
))
request.addExtension(fetch_request)
return request
| tobias47n9e/social-core | social_core/backends/ngpvan.py | Python | bsd-3-clause | 2,097 |
"""
All logic concerning ES-HyperNEAT resides here.
"""
import copy
import neat
import numpy as np
from pureples.hyperneat.hyperneat import query_cppn
from pureples.shared.visualize import draw_es
class ESNetwork:
"""
The evolvable substrate network.
"""
def __init__(self, substrate, cppn, params):
self.substrate = substrate
self.cppn = cppn
self.initial_depth = params["initial_depth"]
self.max_depth = params["max_depth"]
self.variance_threshold = params["variance_threshold"]
self.band_threshold = params["band_threshold"]
self.iteration_level = params["iteration_level"]
self.division_threshold = params["division_threshold"]
self.max_weight = params["max_weight"]
self.connections = set()
# Number of layers in the network.
self.activations = 2 ** params["max_depth"] + 1
activation_functions = neat.activations.ActivationFunctionSet()
self.activation = activation_functions.get(params["activation"])
def create_phenotype_network(self, filename=None):
"""
Create a RecurrentNetwork using the ES-HyperNEAT approach.
"""
input_coordinates = self.substrate.input_coordinates
output_coordinates = self.substrate.output_coordinates
input_nodes = list(range(len(input_coordinates)))
output_nodes = list(range(len(input_nodes), len(
input_nodes)+len(output_coordinates)))
hidden_idx = len(input_coordinates)+len(output_coordinates)
coordinates, indices, draw_connections, node_evals = [], [], [], []
nodes = {}
coordinates.extend(input_coordinates)
coordinates.extend(output_coordinates)
indices.extend(input_nodes)
indices.extend(output_nodes)
# Map input and output coordinates to their IDs.
coords_to_id = dict(zip(coordinates, indices))
# Where the magic happens.
hidden_nodes, connections = self.es_hyperneat()
# Map hidden coordinates to their IDs.
for x, y in hidden_nodes:
coords_to_id[x, y] = hidden_idx
hidden_idx += 1
# For every coordinate:
# Check the connections and create a node with corresponding connections if appropriate.
for (x, y), idx in coords_to_id.items():
for c in connections:
if c.x2 == x and c.y2 == y:
draw_connections.append(c)
if idx in nodes:
initial = nodes[idx]
initial.append((coords_to_id[c.x1, c.y1], c.weight))
nodes[idx] = initial
else:
nodes[idx] = [(coords_to_id[c.x1, c.y1], c.weight)]
# Combine the indices with the connections/links;
# forming node_evals used by the RecurrentNetwork.
for idx, links in nodes.items():
node_evals.append((idx, self.activation, sum, 0.0, 1.0, links))
# Visualize the network?
if filename is not None:
draw_es(coords_to_id, draw_connections, filename)
# This is actually a feedforward network.
return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals)
@staticmethod
def get_weights(p):
"""
Recursively collect all weights for a given QuadPoint.
"""
temp = []
def loop(pp):
if pp is not None and all(child is not None for child in pp.cs):
for i in range(0, 4):
loop(pp.cs[i])
else:
if pp is not None:
temp.append(pp.w)
loop(p)
return temp
def variance(self, p):
"""
Find the variance of a given QuadPoint.
"""
if not p:
return 0.0
return np.var(self.get_weights(p))
def division_initialization(self, coord, outgoing):
"""
Initialize the quadtree by dividing it in appropriate quads.
"""
root = QuadPoint(0.0, 0.0, 1.0, 1)
q = [root]
while q:
p = q.pop(0)
p.cs[0] = QuadPoint(p.x - p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[1] = QuadPoint(p.x - p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[2] = QuadPoint(p.x + p.width/2.0, p.y +
p.width/2.0, p.width/2.0, p.lvl + 1)
p.cs[3] = QuadPoint(p.x + p.width/2.0, p.y -
p.width/2.0, p.width/2.0, p.lvl + 1)
for c in p.cs:
c.w = query_cppn(coord, (c.x, c.y), outgoing,
self.cppn, self.max_weight)
if (p.lvl < self.initial_depth) or (p.lvl < self.max_depth and self.variance(p)
> self.division_threshold):
for child in p.cs:
q.append(child)
return root
def pruning_extraction(self, coord, p, outgoing):
"""
Determines which connections to express - high variance = more connetions.
"""
for c in p.cs:
d_left, d_right, d_top, d_bottom = None, None, None, None
if self.variance(c) > self.variance_threshold:
self.pruning_extraction(coord, c, outgoing)
else:
d_left = abs(c.w - query_cppn(coord, (c.x - p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_right = abs(c.w - query_cppn(coord, (c.x + p.width,
c.y), outgoing, self.cppn, self.max_weight))
d_top = abs(c.w - query_cppn(coord, (c.x, c.y - p.width),
outgoing, self.cppn, self.max_weight))
d_bottom = abs(c.w - query_cppn(coord, (c.x, c.y +
p.width), outgoing, self.cppn, self.max_weight))
con = None
if max(min(d_top, d_bottom), min(d_left, d_right)) > self.band_threshold:
if outgoing:
con = Connection(coord[0], coord[1], c.x, c.y, c.w)
else:
con = Connection(c.x, c.y, coord[0], coord[1], c.w)
if con is not None:
# Nodes will only connect upwards.
# If connections to same layer is wanted, change to con.y1 <= con.y2.
if not c.w == 0.0 and con.y1 < con.y2 and not (con.x1 == con.x2 and con.y1 == con.y2):
self.connections.add(con)
def es_hyperneat(self):
"""
Explores the hidden nodes and their connections.
"""
inputs = self.substrate.input_coordinates
outputs = self.substrate.output_coordinates
hidden_nodes, unexplored_hidden_nodes = set(), set()
connections1, connections2, connections3 = set(), set(), set()
for x, y in inputs: # Explore from inputs.
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections1 = connections1.union(self.connections)
for c in connections1:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = copy.deepcopy(hidden_nodes)
for _ in range(self.iteration_level): # Explore from hidden.
for x, y in unexplored_hidden_nodes:
root = self.division_initialization((x, y), True)
self.pruning_extraction((x, y), root, True)
connections2 = connections2.union(self.connections)
for c in connections2:
hidden_nodes.add((c.x2, c.y2))
self.connections = set()
unexplored_hidden_nodes = hidden_nodes - unexplored_hidden_nodes
for x, y in outputs: # Explore to outputs.
root = self.division_initialization((x, y), False)
self.pruning_extraction((x, y), root, False)
connections3 = connections3.union(self.connections)
self.connections = set()
connections = connections1.union(connections2.union(connections3))
return self.clean_net(connections)
def clean_net(self, connections):
"""
Clean a net for dangling connections:
Intersects paths from input nodes with paths to output.
"""
connected_to_inputs = set(tuple(i)
for i in self.substrate.input_coordinates)
connected_to_outputs = set(tuple(i)
for i in self.substrate.output_coordinates)
true_connections = set()
initial_input_connections = copy.deepcopy(connections)
initial_output_connections = copy.deepcopy(connections)
add_happened = True
while add_happened: # The path from inputs.
add_happened = False
temp_input_connections = copy.deepcopy(initial_input_connections)
for c in temp_input_connections:
if (c.x1, c.y1) in connected_to_inputs:
connected_to_inputs.add((c.x2, c.y2))
initial_input_connections.remove(c)
add_happened = True
add_happened = True
while add_happened: # The path to outputs.
add_happened = False
temp_output_connections = copy.deepcopy(initial_output_connections)
for c in temp_output_connections:
if (c.x2, c.y2) in connected_to_outputs:
connected_to_outputs.add((c.x1, c.y1))
initial_output_connections.remove(c)
add_happened = True
true_nodes = connected_to_inputs.intersection(connected_to_outputs)
for c in connections:
# Only include connection if both source and target node resides in the real path from input to output
if (c.x1, c.y1) in true_nodes and (c.x2, c.y2) in true_nodes:
true_connections.add(c)
true_nodes -= (set(self.substrate.input_coordinates)
.union(set(self.substrate.output_coordinates)))
return true_nodes, true_connections
class QuadPoint:
"""
Class representing an area in the quadtree.
Defined by a center coordinate and the distance to the edges of the area.
"""
def __init__(self, x, y, width, lvl):
self.x = x
self.y = y
self.w = 0.0
self.width = width
self.cs = [None] * 4
self.lvl = lvl
class Connection:
"""
Class representing a connection from one point to another with a certain weight.
"""
def __init__(self, x1, y1, x2, y2, weight):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.weight = weight
# Below is needed for use in set.
def __eq__(self, other):
if isinstance(other) is not Connection:
return NotImplemented
return (self.x1, self.y1, self.x2, self.y2) == (other.x1, other.y1, other.x2, other.y2)
def __hash__(self):
return hash((self.x1, self.y1, self.x2, self.y2, self.weight))
def find_pattern(cppn, coord, res=60, max_weight=5.0):
"""
From a given point, query the cppn for weights to all other points.
This can be visualized as a connectivity pattern.
"""
im = np.zeros((res, res))
for x2 in range(res):
for y2 in range(res):
x2_scaled = -1.0 + (x2/float(res))*2.0
y2_scaled = -1.0 + (y2/float(res))*2.0
i = [coord[0], coord[1], x2_scaled, y2_scaled, 1.0]
n = cppn.activate(i)[0]
im[x2][y2] = n * max_weight
return im
| ukuleleplayer/pureples | pureples/es_hyperneat/es_hyperneat.py | Python | mit | 11,986 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_access_within_threshold_variable import abstract_access_within_threshold_variable
class employment_within_DDD_minutes_travel_time_hbw_am_walk(abstract_access_within_threshold_variable):
"""total number of jobs for zones within DDD minutes travel time
"""
def __init__(self, number):
self.threshold = number
self.travel_data_attribute = "travel_data.am_walk_time_in_minutes"
self.zone_attribute_to_access = "urbansim_parcel.zone.number_of_jobs"
abstract_access_within_threshold_variable.__init__(self)
from numpy import array, ma
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
def get_values(self, number):
variable_name = "urbansim_parcel.zone.employment_within_%s_minutes_travel_time_hbw_am_walk" % number
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='zones',
table_data={
"zone_id":array([1,3]),
"number_of_jobs":array([10, 1]),
}
)
storage.write_table(
table_name='travel_data',
table_data={
"from_zone_id": array([3,3,1,1]),
"to_zone_id": array([1,3,1,3]),
"am_walk_time_in_minutes": array([1.1, 2.2, 3.3, 4.4]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
zone = dataset_pool.get_dataset('zone')
zone.compute_variables(variable_name,
dataset_pool=dataset_pool)
values = zone.get_attribute(variable_name)
return values
def test_to_2(self):
values = self.get_values(2)
should_be = array([0, 10])
self.assert_(ma.allequal(values, should_be), "Error in employment_within_2_minutes_travel_time_hbw_am_walk")
def test_to_4(self):
values = self.get_values(4)
should_be = array([10, 11])
self.assert_(ma.allequal(values, should_be), "Error in employment_within_4_minutes_travel_time_hbw_am_walk")
if __name__=='__main__':
opus_unittest.main()
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim_parcel/zone/employment_within_DDD_minutes_travel_time_hbw_am_walk.py | Python | gpl-2.0 | 2,530 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from .views import (IndexView, CreateView, DetailView)
from .ports.views import (AddInterfaceView, SetGatewayView)
urlpatterns = patterns('horizon.dashboards.project.routers.views',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create/$', CreateView.as_view(), name='create'),
url(r'^(?P<router_id>[^/]+)/$',
DetailView.as_view(),
name='detail'),
url(r'^(?P<router_id>[^/]+)/addinterface', AddInterfaceView.as_view(),
name='addinterface'),
url(r'^(?P<router_id>[^/]+)/setgateway',
SetGatewayView.as_view(),
name='setgateway'),
)
| Frostman/eho-horizon | openstack_dashboard/dashboards/project/routers/urls.py | Python | apache-2.0 | 1,319 |
# coding: utf-8
from lib.models.userbase import UserBase
class Admin(UserBase):
def to_string(self):
return {
"id": self.id,
"username": self.username,
"nickname": self.nickname,
}
def to_detail_string(self):
return self.to_string()
| BillBillBillBill/Take-out | backend/takeout/admin/models/admin.py | Python | mit | 305 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dot_app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| akiokio/dot | src/dot_app/manage.py | Python | bsd-2-clause | 250 |
# coding: utf-8
from django.http import HttpResponseRedirect as redir
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
def subscribe(request):
if request.method == 'POST':
return create(request)
return new(request)
def new(request):
return render(request,
'subscriptions/subscription_form.html',
{'form': SubscriptionForm()})
def create(request):
form = SubscriptionForm(request.POST)
if not form.is_valid():
return render(request,
'subscriptions/subscription_form.html',
{'form': form})
obj = form.save()
return redir('/inscricao/%d/' % obj.pk)
def detail(request, pk):
subscription = get_object_or_404(Subscription, pk=pk)
return render(request, 'subscriptions/subscription_detail.html',
{'subscription': subscription})
| xiru/xiru.wttd | eventex/subscriptions/views.py | Python | gpl-2.0 | 1,022 |
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.extractor import YoutubeIE
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': 'x'},
{'ext': 'mp4', 'height': 460, 'url': 'y'},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': 'a'},
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': '_'},
{'ext': 'mp4', 'height': 720, 'url': '_'},
{'ext': 'flv', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': '_'},
{'ext': 'webm', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_limit(self):
formats = [
{'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1},
{'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2},
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
]
info_dict = _make_result(formats)
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
ydl = YDL({'format_limit': 'good'})
assert ydl.params['format_limit'] == 'good'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'good')
ydl = YDL({'format_limit': 'great', 'format': 'all'})
ydl.process_ie_result(info_dict.copy())
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh')
self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good')
self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great')
self.assertTrue('3' in ydl.msgs[0])
ydl = YDL()
ydl.params['format_limit'] = 'excellent'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_audio_exts(self):
formats = [
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'aac-64')
ydl = YDL({'format': 'mp3'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
ydl = YDL({'prefer_free_formats': True})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'ogg-64')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '171', '139',
]
for f1id, f2id in zip(order, order[1:]):
f1 = YoutubeIE._formats[f1id].copy()
f1['format_id'] = f1id
f1['url'] = 'url:' + f1id
f2 = YoutubeIE._formats[f2id].copy()
f2['format_id'] = f2id
f2['url'] = 'url:' + f2id
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
def test_format_filtering(self):
formats = [
{'format_id': 'A', 'filesize': 500, 'width': 1000},
{'format_id': 'B', 'filesize': 1000, 'width': 500},
{'format_id': 'C', 'filesize': 1000, 'width': 400},
{'format_id': 'D', 'filesize': 2000, 'width': 600},
{'format_id': 'E', 'filesize': 3000},
{'format_id': 'F'},
{'format_id': 'G', 'filesize': 1000000},
]
for f in formats:
f['url'] = 'http://_/'
f['ext'] = 'unknown'
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[filesize<3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'D')
ydl = YDL({'format': 'best[filesize<=3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'F')
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'B')
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'C')
ydl = YDL({'format': '[filesize>?1]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': '[filesize<1M]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': '[filesize<1MiB]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
# Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
if __name__ == '__main__':
unittest.main()
| rzhxeo/youtube-dl | test/test_YoutubeDL.py | Python | unlicense | 14,539 |
Subsets and Splits