repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ericchuhong/WellSoonWeb | www/models.py | 1 | 1460 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Model for users comment,blog
"""
__author__ = 'Chuhong Ma'
import time, uuid
from orm import Model, StringField, BoolenField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
passwd = StringField(ddl='varchar(50)')
admin = BoolenField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id,ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(50)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(50)')
content = TextField()
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id,ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(50)')
content = TextField()
created_at = FloatField(default=time.time)
| mit | -4,328,421,956,334,911,500 | 27.076923 | 74 | 0.65411 | false |
sschmeier/genomics-tutorial | conf.py | 1 | 13833 | # -*- coding: utf-8 -*-
#
# Genomics Tutorial documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 19 11:28:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# for enabling markdown
#from recommonmark.parser import CommonMarkParser
#source_parsers = {
# '.md': CommonMarkParser,
#}
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Genomics Tutorial'
copyright = u'2016-2019, Sebastian Schmeier (https://sschmeier.com)'
author = u'Sebastian Schmeier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2019.03'
# The full version, including alpha/beta/rc tags.
release = u'2019.03'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# if you want to exclude certain section bsed on a tag on "sphinx build -t restrictivemode ..."
#if tags.has('restictivemode'):
# exclude_patterns = ['**/*bla*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Use automatic figure numbering
numfig=True
# you need to specify all three in this section otherwise throws error for latex
#numfig_format={'figure': 'Figure %s', 'table': 'Table %s', 'code-block': 'Listing %s'}
#numfig_secnum_depth = 1
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#from better import better_theme_path
#html_theme_path = [better_theme_path]
#html_theme = 'better'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
#html_theme = 'alabaster'
#html_theme = "classic"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the documentation.
#html_theme_options = {
# 'collapse_navigation': False,
# 'display_version': True
#}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# rtd
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u''
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'images/icon.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = 'images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Genomicsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
'preamble': r'''
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
''',
# Latex figure (float) alignment
#
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Genomics.tex', u'Computational Genomics Tutorial',
u'Sebastian Schmeier (https://sschmeier.com)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = 'images/icon-latex.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
latex_use_parts = False
# If true, show page references after internal links.
#
latex_show_pagerefs = True
# If true, show URL addresses after external links.
# one of:
# 'no' – do not display URLs (default)
# 'footnote' – display URLs in footnotes
# 'inline' – display URLs inline in parentheses
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Genomics Tutorial', u'Genomics Tutorial Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Genomics', u'Computational Genomics Tutorial',
author, 'Computational Genomics Tutorial', 'Computational Genomics Tutorial Content.',
'teaching'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
#-----------------------------------------------------
# SEB:
# global substitutions
# epilog will be added to the end of each rst-file
# we define some shortcuts here
rst_epilog = """
.. |fileanc| replace:: ancestor
.. |fileanc1| replace:: ancestor-R1
.. |fileanc2| replace:: ancestor-R2
.. |fileevol| replace:: evolved-6
.. |fileevol1| replace:: evolved-6-R1
.. |fileevol2| replace:: evolved-6-R2
.. |conda| replace:: `conda <http://conda.pydata.org/miniconda.html>`__
.. |kraken| replace:: `Kraken2 <https://www.ccb.jhu.edu/software/kraken2/>`__
.. |bracken| replace:: `Bracken <https://ccb.jhu.edu/software/bracken/index.shtml>`__
.. |centrifuge| replace:: `Centrifuge <http://www.ccb.jhu.edu/software/centrifuge/index.shtml>`__
.. |ncbitax| replace:: `NCBI Taxonomy <https://www.ncbi.nlm.nih.gov/taxonomy>`__
.. |spades| replace:: `SPAdes <http://bioinf.spbau.ru/spades>`__
.. |krona| replace:: `Krona <https://github.com/marbl/Krona/wiki>`__
.. |solexaqa| replace:: `SolexaQA++ <http://solexaqa.sourceforge.net>`__
.. |fastqc| replace:: `FastQC <http://www.bioinformatics.babraham.ac.uk/projects/fastqc/>`__
.. |sickle| replace:: `Sickle <https://github.com/najoshi/sickle>`__
.. |quast| replace:: `Quast <http://quast.bioinf.spbau.ru/>`__
.. |freebayes| replace:: `freebayes <https://github.com/ekg/freebayes>`__
.. |samtools| replace:: `SAMtools <http://samtools.sourceforge.net/>`__
.. |bwa| replace:: `BWA <http://bio-bwa.sourceforge.net/>`__
.. |bowtie| replace:: `Bowtie2 <http://bowtie-bio.sourceforge.net/bowtie2/index.shtml>`__
.. |qualimap| replace:: `QualiMap <http://qualimap.bioinfo.cipf.es/>`__
.. |R| replace:: `R <https://www.r-project.org/>`__
.. |bcftools| replace:: `BCFtools <http://www.htslib.org/doc/bcftools.html>`__
.. |vcflib| replace:: `vcflib <https://github.com/vcflib/vcflib#vcflib>`__
.. |illumina| replace:: `Illumina <http://illumina.com>`__
.. |augustus| replace:: `Augustus <http://augustus.gobics.de>`__
.. |busco| replace:: `BUSCO <http://busco.ezlab.org>`__
.. |blastn| replace:: `blastn <https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastSearch>`__
.. |blast| replace:: `BLAST <https://blast.ncbi.nlm.nih.gov/Blast.cgi>`__
.. |igv| replace:: `IGV <http://software.broadinstitute.org/software/igv/>`__
.. |muscle| replace:: `MUSCLE <http://www.ebi.ac.uk/Tools/msa/muscle/>`__
.. |raxml| replace:: `RAxML-NG <https://github.com/amkozlov/raxml-ng>`__
.. |snpeff| replace:: `SnpEff <http://snpeff.sourceforge.net/index.html>`__
"""
# prolog will be added to the beginning of each file
# rst_prolog=""
# to be able to use two dashes in my own blocks I turn off smartypants
#html_use_smartypants=False
smart_quotes = False
def setup(app):
app.add_stylesheet('css/seb.css')
| mit | -4,872,524,209,302,796,000 | 31.081206 | 97 | 0.688725 | false |
kreeger/etcetera | checkout/admin.py | 1 | 1647 | from etcetera.checkout.models import Checkout
from django.contrib import admin
# This file determines what's shown in the admin interface
class CheckoutAdmin(admin.ModelAdmin):
fieldsets = (
('Basic information', {
'fields': (
'first_name',
'last_name',
'department',
'department_text',
'course',
'phone',
'email',
'building',
'room',
'creation_date',
'handling_user',
),
}),
('Checkout information', {
'classes': (
'collapse',
),
'fields': (
'checkout_type',
'return_type',
'equipment_needed',
'out_date',
'return_date',
'delivering_user',
'action_date',
'returning_person',
'equipment_list',
'other_equipment',
'confirmation_sent',
'completion_date',
),
}),
)
list_display = (
'first_name',
'last_name',
'department',
'equipment_needed',
'checkout_type',
'out_date',
)
list_filter = (
'out_date',
)
search_fields = (
'first_name',
'last_name',
'department__name',
'department_text',
'building__name',
'room',
'equipment_needed',
)
# Register the appropriate models
admin.site.register(Checkout, CheckoutAdmin)
| bsd-3-clause | 692,675,820,649,193,000 | 24.734375 | 58 | 0.428051 | false |
kaiw/meld | meld/filediff.py | 1 | 85802 | ### Copyright (C) 2002-2006 Stephen Kennedy <[email protected]>
### Copyright (C) 2009-2012 Kai Willadsen <[email protected]>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
### USA.
import copy
import functools
import io
import os
from gettext import gettext as _
import time
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import pango
import glib
import gobject
import gtk
import gtk.keysyms
from . import diffutil
from . import matchers
from . import meldbuffer
from . import melddoc
from . import merge
from . import misc
from . import patchdialog
from . import paths
from . import recent
from . import undo
from .ui import findbar
from .ui import gnomeglade
from .meldapp import app
from .util.compat import text_type
from .util.sourceviewer import srcviewer
class CachedSequenceMatcher(object):
"""Simple class for caching diff results, with LRU-based eviction
Results from the SequenceMatcher are cached and timestamped, and
subsequently evicted based on least-recent generation/usage. The LRU-based
eviction is overly simplistic, but is okay for our usage pattern.
"""
process_pool = None
def __init__(self):
if self.process_pool is None:
if os.name == "nt":
CachedSequenceMatcher.process_pool = ThreadPool(None)
else:
# maxtasksperchild is new in Python 2.7; this is for 2.6 compat
try:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker, maxtasksperchild=1)
except TypeError:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker)
self.cache = {}
def match(self, text1, textn, cb):
try:
self.cache[(text1, textn)][1] = time.time()
cb(self.cache[(text1, textn)][0])
except KeyError:
def inline_cb(opcodes):
self.cache[(text1, textn)] = [opcodes, time.time()]
gobject.idle_add(lambda: cb(opcodes))
self.process_pool.apply_async(matchers.matcher_worker,
(text1, textn),
callback=inline_cb)
def clean(self, size_hint):
"""Clean the cache if necessary
@param size_hint: the recommended minimum number of cache entries
"""
if len(self.cache) < size_hint * 3:
return
items = self.cache.items()
items.sort(key=lambda it: it[1][1])
for item in items[:-size_hint * 2]:
del self.cache[item[0]]
MASK_SHIFT, MASK_CTRL = 1, 2
MODE_REPLACE, MODE_DELETE, MODE_INSERT = 0, 1, 2
class CursorDetails(object):
__slots__ = ("pane", "pos", "line", "offset", "chunk", "prev", "next",
"prev_conflict", "next_conflict")
def __init__(self):
for var in self.__slots__:
setattr(self, var, None)
class TaskEntry(object):
__slots__ = ("filename", "file", "buf", "codec", "pane", "was_cr")
def __init__(self, *args):
for var, val in zip(self.__slots__, args):
setattr(self, var, val)
class TextviewLineAnimation(object):
__slots__ = ("start_mark", "end_mark", "start_rgba", "end_rgba",
"start_time", "duration")
def __init__(self, mark0, mark1, rgba0, rgba1, duration):
self.start_mark = mark0
self.end_mark = mark1
self.start_rgba = rgba0
self.end_rgba = rgba1
self.start_time = glib.get_current_time()
self.duration = duration
class FileDiff(melddoc.MeldDoc, gnomeglade.Component):
"""Two or three way diff of text files.
"""
differ = diffutil.Differ
keylookup = {gtk.keysyms.Shift_L : MASK_SHIFT,
gtk.keysyms.Control_L : MASK_CTRL,
gtk.keysyms.Shift_R : MASK_SHIFT,
gtk.keysyms.Control_R : MASK_CTRL}
# Identifiers for MsgArea messages
(MSG_SAME, MSG_SLOW_HIGHLIGHT) = list(range(2))
__gsignals__ = {
'next-conflict-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (bool, bool)),
'action-mode-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (int,)),
}
def __init__(self, prefs, num_panes):
"""Start up an filediff with num_panes empty contents.
"""
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("filediff.ui"), "filediff")
self.map_widgets_into_lists(["textview", "fileentry", "diffmap",
"scrolledwindow", "linkmap",
"statusimage", "msgarea_mgr", "vbox",
"selector_hbox", "readonlytoggle"])
# This SizeGroup isn't actually necessary for FileDiff; it's for
# handling non-homogenous selectors in FileComp. It's also fragile.
column_sizes = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
column_sizes.set_ignore_hidden(True)
for widget in self.selector_hbox:
column_sizes.add_widget(widget)
self.warned_bad_comparison = False
# Some sourceviews bind their own undo mechanism, which we replace
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK)
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK)
for v in self.textview:
v.set_buffer(meldbuffer.MeldBuffer())
v.set_show_line_numbers(self.prefs.show_line_numbers)
v.set_insert_spaces_instead_of_tabs(self.prefs.spaces_instead_of_tabs)
v.set_wrap_mode(self.prefs.edit_wrap_lines)
if self.prefs.show_whitespace:
v.set_draw_spaces(srcviewer.spaces_flag)
srcviewer.set_tab_width(v, self.prefs.tab_size)
self._keymask = 0
self.load_font()
self.deleted_lines_pending = -1
self.textview_overwrite = 0
self.focus_pane = None
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.textbuffer = [v.get_buffer() for v in self.textview]
self.buffer_texts = [meldbuffer.BufferLines(b) for b in self.textbuffer]
self.undosequence = undo.UndoSequence()
self.text_filters = []
self.create_text_filters()
self.app_handlers = [app.connect("text-filters-changed",
self.on_text_filters_changed)]
self.buffer_filtered = [meldbuffer.BufferLines(b, self._filter_text)
for b in self.textbuffer]
for (i, w) in enumerate(self.scrolledwindow):
w.get_vadjustment().connect("value-changed", self._sync_vscroll, i)
w.get_hadjustment().connect("value-changed", self._sync_hscroll)
self._connect_buffer_handlers()
self._sync_vscroll_lock = False
self._sync_hscroll_lock = False
self._scroll_lock = False
self.linediffer = self.differ()
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.force_highlight = False
self.syncpoints = []
self.in_nested_textview_gutter_expose = False
self._cached_match = CachedSequenceMatcher()
self.anim_source_id = [None for buf in self.textbuffer]
self.animating_chunks = [[] for buf in self.textbuffer]
for buf in self.textbuffer:
buf.create_tag("inline")
buf.connect("notify::has-selection",
self.update_text_actions_sensitivity)
actions = (
("MakePatch", None, _("Format as Patch..."), None,
_("Create a patch using differences between files"),
self.make_patch),
("SaveAll", None, _("Save A_ll"), "<Ctrl><Shift>L",
_("Save all files in the current comparison"),
self.on_save_all_activate),
("Revert", gtk.STOCK_REVERT_TO_SAVED, None, None,
_("Revert files to their saved versions"),
self.on_revert_activate),
("SplitAdd", None, _("Add Synchronization Point"), None,
_("Add a manual point for synchronization of changes between "
"files"),
self.add_sync_point),
("SplitClear", None, _("Clear Synchronization Points"), None,
_("Clear manual change sychronization points"),
self.clear_sync_points),
("PrevConflict", None, _("Previous Conflict"), "<Ctrl>I",
_("Go to the previous conflict"),
lambda x: self.on_next_conflict(gtk.gdk.SCROLL_UP)),
("NextConflict", None, _("Next Conflict"), "<Ctrl>K",
_("Go to the next conflict"),
lambda x: self.on_next_conflict(gtk.gdk.SCROLL_DOWN)),
("PushLeft", gtk.STOCK_GO_BACK, _("Push to Left"), "<Alt>Left",
_("Push current change to the left"),
lambda x: self.push_change(-1)),
("PushRight", gtk.STOCK_GO_FORWARD,
_("Push to Right"), "<Alt>Right",
_("Push current change to the right"),
lambda x: self.push_change(1)),
# FIXME: using LAST and FIRST is terrible and unreliable icon abuse
("PullLeft", gtk.STOCK_GOTO_LAST,
_("Pull from Left"), "<Alt><Shift>Right",
_("Pull change from the left"),
lambda x: self.pull_change(-1)),
("PullRight", gtk.STOCK_GOTO_FIRST,
_("Pull from Right"), "<Alt><Shift>Left",
_("Pull change from the right"),
lambda x: self.pull_change(1)),
("CopyLeftUp", None, _("Copy Above Left"), "<Alt>bracketleft",
_("Copy change above the left chunk"),
lambda x: self.copy_change(-1, -1)),
("CopyLeftDown", None, _("Copy Below Left"), "<Alt>semicolon",
_("Copy change below the left chunk"),
lambda x: self.copy_change(-1, 1)),
("CopyRightUp", None, _("Copy Above Right"), "<Alt>bracketright",
_("Copy change above the right chunk"),
lambda x: self.copy_change(1, -1)),
("CopyRightDown", None, _("Copy Below Right"), "<Alt>quoteright",
_("Copy change below the right chunk"),
lambda x: self.copy_change(1, 1)),
("Delete", gtk.STOCK_DELETE, _("Delete"), "<Alt>Delete",
_("Delete change"),
self.delete_change),
("MergeFromLeft", None, _("Merge All from Left"), None,
_("Merge all non-conflicting changes from the left"),
lambda x: self.pull_all_non_conflicting_changes(-1)),
("MergeFromRight", None, _("Merge All from Right"), None,
_("Merge all non-conflicting changes from the right"),
lambda x: self.pull_all_non_conflicting_changes(1)),
("MergeAll", None, _("Merge All"), None,
_("Merge all non-conflicting changes from left and right "
"panes"),
lambda x: self.merge_all_non_conflicting_changes()),
("CycleDocuments", None,
_("Cycle Through Documents"), "<control>Escape",
_("Move keyboard focus to the next document in this "
"comparison"),
self.action_cycle_documents),
)
toggle_actions = (
("LockScrolling", None, _("Lock Scrolling"), None,
_("Lock scrolling of all panes"),
self.on_action_lock_scrolling_toggled, True),
)
self.ui_file = paths.ui_dir("filediff-ui.xml")
self.actiongroup = gtk.ActionGroup('FilediffPopupActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggle_actions)
self.main_actiongroup = None
self.findbar = findbar.FindBar(self.table)
self.widget.ensure_style()
self.on_style_set(self.widget, None)
self.widget.connect("style-set", self.on_style_set)
self.set_num_panes(num_panes)
gobject.idle_add( lambda *args: self.load_font()) # hack around Bug 316730
gnomeglade.connect_signal_handlers(self)
self.cursor = CursorDetails()
self.connect("current-diff-changed", self.on_current_diff_changed)
for t in self.textview:
t.connect("focus-in-event", self.on_current_diff_changed)
t.connect("focus-out-event", self.on_current_diff_changed)
self.linediffer.connect("diffs-changed", self.on_diffs_changed)
self.undosequence.connect("checkpointed", self.on_undo_checkpointed)
self.connect("next-conflict-changed", self.on_next_conflict_changed)
overwrite_label = gtk.Label()
overwrite_label.show()
cursor_label = gtk.Label()
cursor_label.show()
self.status_info_labels = [overwrite_label, cursor_label]
def get_keymask(self):
return self._keymask
def set_keymask(self, value):
if value & MASK_SHIFT:
mode = MODE_DELETE
elif value & MASK_CTRL:
mode = MODE_INSERT
else:
mode = MODE_REPLACE
self._keymask = value
self.emit("action-mode-changed", mode)
keymask = property(get_keymask, set_keymask)
def on_style_set(self, widget, prev_style):
style = widget.get_style()
lookup = lambda color_id, default: style.lookup_color(color_id) or \
gtk.gdk.color_parse(default)
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
tag.props.background = lookup("inline-bg", "LightSteelBlue2")
tag.props.foreground = lookup("inline-fg", "Red")
self.fill_colors = {"insert" : lookup("insert-bg", "DarkSeaGreen1"),
"delete" : lookup("insert-bg", "DarkSeaGreen1"),
"conflict": lookup("conflict-bg", "Pink"),
"replace" : lookup("replace-bg", "#ddeeff"),
"current-chunk-highlight":
lookup("current-chunk-highlight", '#ffffff')}
self.line_colors = {"insert" : lookup("insert-outline", "#77f077"),
"delete" : lookup("insert-outline", "#77f077"),
"conflict": lookup("conflict-outline", "#f0768b"),
"replace" : lookup("replace-outline", "#8bbff3")}
self.highlight_color = lookup("current-line-highlight", "#ffff00")
self.syncpoint_color = lookup("syncpoint-outline", "#555555")
for associated in self.diffmap + self.linkmap:
associated.set_color_scheme([self.fill_colors, self.line_colors])
self.queue_draw()
def on_focus_change(self):
self.keymask = 0
def on_container_switch_in_event(self, ui):
self.main_actiongroup = [a for a in ui.get_action_groups()
if a.get_name() == "MainActions"][0]
melddoc.MeldDoc.on_container_switch_in_event(self, ui)
# FIXME: If no focussed textview, action sensitivity will be unset
def on_text_filters_changed(self, app):
relevant_change = self.create_text_filters()
if relevant_change:
self.refresh_comparison()
def create_text_filters(self):
# In contrast to file filters, ordering of text filters can matter
old_active = [f.filter_string for f in self.text_filters if f.active]
new_active = [f.filter_string for f in app.text_filters if f.active]
active_filters_changed = old_active != new_active
self.text_filters = [copy.copy(f) for f in app.text_filters]
return active_filters_changed
def _disconnect_buffer_handlers(self):
for textview in self.textview:
textview.set_editable(0)
for buf in self.textbuffer:
assert hasattr(buf,"handlers")
for h in buf.handlers:
buf.disconnect(h)
def _connect_buffer_handlers(self):
for textview, buf in zip(self.textview, self.textbuffer):
textview.set_editable(buf.data.editable)
for buf in self.textbuffer:
id0 = buf.connect("insert-text", self.on_text_insert_text)
id1 = buf.connect("delete-range", self.on_text_delete_range)
id2 = buf.connect_after("insert-text", self.after_text_insert_text)
id3 = buf.connect_after("delete-range", self.after_text_delete_range)
id4 = buf.connect("notify::cursor-position",
self.on_cursor_position_changed)
buf.handlers = id0, id1, id2, id3, id4
# Abbreviations for insert and overwrite that fit in the status bar
_insert_overwrite_text = (_("INS"), _("OVR"))
# Abbreviation for line, column so that it will fit in the status bar
_line_column_text = _("Ln %i, Col %i")
def on_cursor_position_changed(self, buf, pspec, force=False):
pane = self.textbuffer.index(buf)
pos = buf.props.cursor_position
if pane == self.cursor.pane and pos == self.cursor.pos and not force:
return
self.cursor.pane, self.cursor.pos = pane, pos
cursor_it = buf.get_iter_at_offset(pos)
offset = cursor_it.get_line_offset()
line = cursor_it.get_line()
insert_overwrite = self._insert_overwrite_text[self.textview_overwrite]
line_column = self._line_column_text % (line + 1, offset + 1)
self.status_info_labels[0].set_text(insert_overwrite)
self.status_info_labels[1].set_text(line_column)
if line != self.cursor.line or force:
chunk, prev, next_ = self.linediffer.locate_chunk(pane, line)
if chunk != self.cursor.chunk or force:
self.cursor.chunk = chunk
self.emit("current-diff-changed")
if prev != self.cursor.prev or next_ != self.cursor.next or force:
self.emit("next-diff-changed", prev is not None,
next_ is not None)
prev_conflict, next_conflict = None, None
for conflict in self.linediffer.conflicts:
if prev is not None and conflict <= prev:
prev_conflict = conflict
if next_ is not None and conflict >= next_:
next_conflict = conflict
break
if prev_conflict != self.cursor.prev_conflict or \
next_conflict != self.cursor.next_conflict or force:
self.emit("next-conflict-changed", prev_conflict is not None,
next_conflict is not None)
self.cursor.prev, self.cursor.next = prev, next_
self.cursor.prev_conflict = prev_conflict
self.cursor.next_conflict = next_conflict
self.cursor.line, self.cursor.offset = line, offset
def on_current_diff_changed(self, widget, *args):
pane = self._get_focused_pane()
if pane != -1:
# While this *should* be redundant, it's possible for focus pane
# and cursor pane to be different in several situations.
pane = self.cursor.pane
chunk_id = self.cursor.chunk
if pane == -1 or chunk_id is None:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (False,) * 7
else:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (True,) * 7
# Push and Delete are active if the current pane has something to
# act on, and the target pane exists and is editable. Pull is
# sensitive if the source pane has something to get, and the
# current pane is editable. Copy actions are sensitive if the
# conditions for push are met, *and* there is some content in the
# target pane.
editable = self.textview[pane].get_editable()
editable_left = pane > 0 and self.textview[pane - 1].get_editable()
editable_right = pane < self.num_panes - 1 and \
self.textview[pane + 1].get_editable()
if pane == 0 or pane == 2:
chunk = self.linediffer.get_chunk(chunk_id, pane)
insert_chunk = chunk[1] == chunk[2]
delete_chunk = chunk[3] == chunk[4]
push_left = editable_left and not insert_chunk
push_right = editable_right and not insert_chunk
pull_left = pane == 2 and editable and not delete_chunk
pull_right = pane == 0 and editable and not delete_chunk
delete = editable and not insert_chunk
copy_left = push_left and not delete_chunk
copy_right = push_right and not delete_chunk
elif pane == 1:
chunk0 = self.linediffer.get_chunk(chunk_id, 1, 0)
chunk2 = None
if self.num_panes == 3:
chunk2 = self.linediffer.get_chunk(chunk_id, 1, 2)
left_mid_exists = chunk0 is not None and chunk0[1] != chunk0[2]
left_exists = chunk0 is not None and chunk0[3] != chunk0[4]
right_mid_exists = chunk2 is not None and chunk2[1] != chunk2[2]
right_exists = chunk2 is not None and chunk2[3] != chunk2[4]
push_left = editable_left and left_mid_exists
push_right = editable_right and right_mid_exists
pull_left = editable and left_exists
pull_right = editable and right_exists
delete = editable and (left_mid_exists or right_mid_exists)
copy_left = push_left and left_exists
copy_right = push_right and right_exists
self.actiongroup.get_action("PushLeft").set_sensitive(push_left)
self.actiongroup.get_action("PushRight").set_sensitive(push_right)
self.actiongroup.get_action("PullLeft").set_sensitive(pull_left)
self.actiongroup.get_action("PullRight").set_sensitive(pull_right)
self.actiongroup.get_action("Delete").set_sensitive(delete)
self.actiongroup.get_action("CopyLeftUp").set_sensitive(copy_left)
self.actiongroup.get_action("CopyLeftDown").set_sensitive(copy_left)
self.actiongroup.get_action("CopyRightUp").set_sensitive(copy_right)
self.actiongroup.get_action("CopyRightDown").set_sensitive(copy_right)
# FIXME: don't queue_draw() on everything... just on what changed
self.queue_draw()
def on_next_conflict_changed(self, doc, have_prev, have_next):
self.actiongroup.get_action("PrevConflict").set_sensitive(have_prev)
self.actiongroup.get_action("NextConflict").set_sensitive(have_next)
def on_next_conflict(self, direction):
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next_conflict
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev_conflict
if target is None:
return
buf = self.textbuffer[self.cursor.pane]
chunk = self.linediffer.get_chunk(target, self.cursor.pane)
buf.place_cursor(buf.get_iter_at_line(chunk[1]))
self.textview[self.cursor.pane].scroll_to_mark(buf.get_insert(), 0.1)
def push_change(self, direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def pull_change(self, direction):
dst = self._get_focused_pane()
src = dst + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(dst != -1 and self.cursor.chunk is not None)
assert(src in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def copy_change(self, direction, copy_direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
copy_up = True if copy_direction < 0 else False
self.copy_chunk(src, dst, chunk, copy_up)
def pull_all_non_conflicting_changes(self, direction):
assert direction in (-1, 1)
dst = self._get_focused_pane()
src = dst + direction
assert src in range(self.num_panes)
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_2_files(src, dst):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[src].get_vadjustment(), src)
self.scheduler.add_task(resync)
def merge_all_non_conflicting_changes(self):
dst = 1
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_3_files(False):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[0].get_vadjustment(), 0)
self.scheduler.add_task(resync)
def delete_change(self, widget):
pane = self._get_focused_pane()
chunk = self.linediffer.get_chunk(self.cursor.chunk, pane)
assert(pane != -1 and self.cursor.chunk is not None)
assert(chunk is not None)
self.delete_chunk(pane, chunk)
def _synth_chunk(self, pane0, pane1, line):
"""Returns the Same chunk that would exist at
the given location if we didn't remove Same chunks"""
# This method is a hack around our existing diffutil data structures;
# getting rid of the Same chunk removal is difficult, as several places
# have baked in the assumption of only being given changed blocks.
buf0, buf1 = self.textbuffer[pane0], self.textbuffer[pane1]
start0, end0 = 0, buf0.get_line_count() - 1
start1, end1 = 0, buf1.get_line_count() - 1
# This hack is required when pane0's prev/next chunk doesn't exist
# (i.e., is Same) between pane0 and pane1.
prev_chunk0, prev_chunk1, next_chunk0, next_chunk1 = (None,) * 4
_, prev, next_ = self.linediffer.locate_chunk(pane0, line)
if prev is not None:
while prev >= 0:
prev_chunk0 = self.linediffer.get_chunk(prev, pane0, pane1)
prev_chunk1 = self.linediffer.get_chunk(prev, pane1, pane0)
if None not in (prev_chunk0, prev_chunk1):
start0 = prev_chunk0[2]
start1 = prev_chunk1[2]
break
prev -= 1
if next_ is not None:
while next_ < self.linediffer.diff_count():
next_chunk0 = self.linediffer.get_chunk(next_, pane0, pane1)
next_chunk1 = self.linediffer.get_chunk(next_, pane1, pane0)
if None not in (next_chunk0, next_chunk1):
end0 = next_chunk0[1]
end1 = next_chunk1[1]
break
next_ += 1
return "Same", start0, end0, start1, end1
def _corresponding_chunk_line(self, chunk, line, pane, new_pane):
"""Approximates the corresponding line between panes"""
old_buf, new_buf = self.textbuffer[pane], self.textbuffer[new_pane]
# Special-case cross-pane jumps
if (pane == 0 and new_pane == 2) or (pane == 2 and new_pane == 0):
proxy = self._corresponding_chunk_line(chunk, line, pane, 1)
return self._corresponding_chunk_line(chunk, proxy, 1, new_pane)
# Either we are currently in a identifiable chunk, or we are in a Same
# chunk; if we establish the start/end of that chunk in both panes, we
# can figure out what our new offset should be.
cur_chunk = None
if chunk is not None:
cur_chunk = self.linediffer.get_chunk(chunk, pane, new_pane)
if cur_chunk is None:
cur_chunk = self._synth_chunk(pane, new_pane, line)
cur_start, cur_end, new_start, new_end = cur_chunk[1:5]
# If the new buffer's current cursor is already in the correct chunk,
# assume that we have in-progress editing, and don't move it.
cursor_it = new_buf.get_iter_at_mark(new_buf.get_insert())
cursor_line = cursor_it.get_line()
cursor_chunk, _, _ = self.linediffer.locate_chunk(new_pane, cursor_line)
if cursor_chunk is not None:
already_in_chunk = cursor_chunk == chunk
else:
cursor_chunk = self._synth_chunk(pane, new_pane, cursor_line)
already_in_chunk = cursor_chunk[3] == new_start and \
cursor_chunk[4] == new_end
if already_in_chunk:
new_line = cursor_line
else:
# Guess where to put the cursor: in the same chunk, at about the
# same place within the chunk, calculated proportionally by line.
# Insert chunks and one-line chunks are placed at the top.
if cur_end == cur_start:
chunk_offset = 0.0
else:
chunk_offset = (line - cur_start) / float(cur_end - cur_start)
new_line = new_start + int(chunk_offset * (new_end - new_start))
return new_line
def action_cycle_documents(self, widget):
pane = self._get_focused_pane()
new_pane = (pane + 1) % self.num_panes
chunk, line = self.cursor.chunk, self.cursor.line
new_line = self._corresponding_chunk_line(chunk, line, pane, new_pane)
new_buf = self.textbuffer[new_pane]
self.textview[new_pane].grab_focus()
new_buf.place_cursor(new_buf.get_iter_at_line(new_line))
self.textview[new_pane].scroll_to_mark(new_buf.get_insert(), 0.1)
def on_textview_focus_in_event(self, view, event):
self.focus_pane = view
self.findbar.textview = view
self.on_cursor_position_changed(view.get_buffer(), None, True)
self._set_save_action_sensitivity()
self._set_merge_action_sensitivity()
self.update_text_actions_sensitivity()
def on_textview_focus_out_event(self, view, event):
self._set_merge_action_sensitivity()
def _after_text_modified(self, buffer, startline, sizechange):
if self.num_panes > 1:
pane = self.textbuffer.index(buffer)
self.linediffer.change_sequence(pane, startline, sizechange,
self.buffer_filtered)
# FIXME: diff-changed signal for the current buffer would be cleaner
focused_pane = self._get_focused_pane()
if focused_pane != -1:
self.on_cursor_position_changed(self.textbuffer[focused_pane],
None, True)
self.queue_draw()
def _filter_text(self, txt):
def killit(m):
assert m.group().count("\n") == 0
if len(m.groups()):
s = m.group()
for g in m.groups():
if g:
s = s.replace(g,"")
return s
else:
return ""
try:
for filt in self.text_filters:
if filt.active:
txt = filt.filter.sub(killit, txt)
except AssertionError:
if not self.warned_bad_comparison:
misc.run_dialog(_("Filter '%s' changed the number of lines in the file. "
"Comparison will be incorrect. See the user manual for more details.") % filt.label)
self.warned_bad_comparison = True
return txt
def after_text_insert_text(self, buf, it, newtext, textlen):
start_mark = buf.get_mark("insertion-start")
starting_at = buf.get_iter_at_mark(start_mark).get_line()
buf.delete_mark(start_mark)
lines_added = it.get_line() - starting_at
self._after_text_modified(buf, starting_at, lines_added)
def after_text_delete_range(self, buffer, it0, it1):
starting_at = it0.get_line()
assert self.deleted_lines_pending != -1
self._after_text_modified(buffer, starting_at, -self.deleted_lines_pending)
self.deleted_lines_pending = -1
def load_font(self):
fontdesc = pango.FontDescription(self.prefs.get_current_font())
context = self.textview0.get_pango_context()
metrics = context.get_metrics( fontdesc, context.get_language() )
line_height_points = metrics.get_ascent() + metrics.get_descent()
self.pixels_per_line = line_height_points // 1024
self.pango_char_width = metrics.get_approximate_char_width()
tabs = pango.TabArray(10, 0)
tab_size = self.prefs.tab_size
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*tab_size*self.pango_char_width)
for i in range(3):
self.textview[i].modify_font(fontdesc)
self.textview[i].set_tabs(tabs)
for i in range(2):
self.linkmap[i].queue_draw()
def on_preference_changed(self, key, value):
if key == "tab_size":
tabs = pango.TabArray(10, 0)
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*value*self.pango_char_width)
for i in range(3):
self.textview[i].set_tabs(tabs)
for t in self.textview:
srcviewer.set_tab_width(t, value)
elif key == "use_custom_font" or key == "custom_font":
self.load_font()
elif key == "show_line_numbers":
for t in self.textview:
t.set_show_line_numbers( value )
elif key == "show_whitespace":
spaces_flag = srcviewer.spaces_flag if value else 0
for v in self.textview:
v.set_draw_spaces(spaces_flag)
elif key == "use_syntax_highlighting":
for i in range(self.num_panes):
srcviewer.set_highlight_syntax(self.textbuffer[i], value)
elif key == "edit_wrap_lines":
for t in self.textview:
t.set_wrap_mode(self.prefs.edit_wrap_lines)
# FIXME: On changing wrap mode, we get one redraw using cached
# coordinates, followed by a second redraw (e.g., on refocus) with
# correct coordinates. Overly-aggressive textview lazy calculation?
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
elif key == "spaces_instead_of_tabs":
for t in self.textview:
t.set_insert_spaces_instead_of_tabs(value)
elif key == "ignore_blank_lines":
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.refresh_comparison()
def on_key_press_event(self, object, event):
# The correct way to handle these modifiers would be to use
# gdk_keymap_get_modifier_state method, available from GDK 3.4.
keymap = gtk.gdk.keymap_get_default()
x = self.keylookup.get(keymap.translate_keyboard_state(
event.hardware_keycode, 0, event.group)[0], 0)
if self.keymask | x != self.keymask:
self.keymask |= x
elif event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_key_release_event(self, object, event):
keymap = gtk.gdk.keymap_get_default()
x = self.keylookup.get(keymap.translate_keyboard_state(
event.hardware_keycode, 0, event.group)[0], 0)
if self.keymask & ~x != self.keymask:
self.keymask &= ~x
def check_save_modified(self, label=None):
response = gtk.RESPONSE_OK
modified = [b.data.modified for b in self.textbuffer]
if True in modified:
ui_path = paths.ui_dir("filediff.ui")
dialog = gnomeglade.Component(ui_path, "check_save_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
if label:
dialog.widget.props.text = label
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
buttons = []
for i in range(self.num_panes):
button = gtk.CheckButton(self.textbuffer[i].data.label)
button.set_use_underline(False)
button.set_sensitive(modified[i])
button.set_active(modified[i])
dialog.extra_vbox.pack_start(button, expand=True, fill=True)
buttons.append(button)
dialog.extra_vbox.show_all()
response = dialog.widget.run()
try_save = [b.get_active() for b in buttons]
dialog.widget.destroy()
if response == gtk.RESPONSE_OK:
for i in range(self.num_panes):
if try_save[i]:
if not self.save_file(i):
return gtk.RESPONSE_CANCEL
elif response == gtk.RESPONSE_DELETE_EVENT:
response = gtk.RESPONSE_CANCEL
return response
def on_delete_event(self, appquit=0):
response = self.check_save_modified()
if response == gtk.RESPONSE_OK:
for h in self.app_handlers:
app.disconnect(h)
return response
#
# text buffer undo/redo
#
def on_undo_activate(self):
if self.undosequence.can_undo():
self.undosequence.undo()
def on_redo_activate(self):
if self.undosequence.can_redo():
self.undosequence.redo()
def on_textbuffer__begin_user_action(self, *buffer):
self.undosequence.begin_group()
def on_textbuffer__end_user_action(self, *buffer):
self.undosequence.end_group()
def on_text_insert_text(self, buf, it, text, textlen):
text = text_type(text, 'utf8')
self.undosequence.add_action(
meldbuffer.BufferInsertionAction(buf, it.get_offset(), text))
buf.create_mark("insertion-start", it, True)
def on_text_delete_range(self, buf, it0, it1):
text = text_type(buf.get_text(it0, it1, False), 'utf8')
assert self.deleted_lines_pending == -1
self.deleted_lines_pending = it1.get_line() - it0.get_line()
self.undosequence.add_action(
meldbuffer.BufferDeletionAction(buf, it0.get_offset(), text))
def on_undo_checkpointed(self, undosequence, buf, checkpointed):
self.set_buffer_modified(buf, not checkpointed)
#
#
#
def open_external(self):
pane = self._get_focused_pane()
if pane >= 0:
if self.textbuffer[pane].data.filename:
pos = self.textbuffer[pane].props.cursor_position
cursor_it = self.textbuffer[pane].get_iter_at_offset(pos)
line = cursor_it.get_line() + 1
self._open_files([self.textbuffer[pane].data.filename], line)
def update_text_actions_sensitivity(self, *args):
widget = self.focus_pane
if not widget:
cut, copy, paste = False, False, False
else:
cut = copy = widget.get_buffer().get_has_selection()
# Ideally, this would check whether the clipboard included
# something pasteable. However, there is no changed signal.
# widget.get_clipboard(
# gtk.gdk.SELECTION_CLIPBOARD).wait_is_text_available()
paste = widget.get_editable()
for action, sens in zip(("Cut", "Copy", "Paste"), (cut, copy, paste)):
self.main_actiongroup.get_action(action).set_sensitive(sens)
def get_selected_text(self):
"""Returns selected text of active pane"""
pane = self._get_focused_pane()
if pane != -1:
buf = self.textbuffer[pane]
sel = buf.get_selection_bounds()
if sel:
return text_type(buf.get_text(sel[0], sel[1], False), 'utf8')
return None
def on_find_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_find(self.focus_pane, selected_text)
self.keymask = 0
def on_replace_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_replace(self.focus_pane, selected_text)
self.keymask = 0
def on_find_next_activate(self, *args):
self.findbar.start_find_next(self.focus_pane)
def on_find_previous_activate(self, *args):
self.findbar.start_find_previous(self.focus_pane)
def on_filediff__key_press_event(self, entry, event):
if event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_scrolledwindow__size_allocate(self, scrolledwindow, allocation):
index = self.scrolledwindow.index(scrolledwindow)
if index == 0 or index == 1:
self.linkmap[0].queue_draw()
if index == 1 or index == 2:
self.linkmap[1].queue_draw()
def on_textview_popup_menu(self, textview):
self.popup_menu.popup(None, None, None, 0,
gtk.get_current_event_time())
return True
def on_textview_button_press_event(self, textview, event):
if event.button == 3:
textview.grab_focus()
self.popup_menu.popup(None, None, None, event.button, event.time)
return True
return False
def on_textview_toggle_overwrite(self, view):
self.textview_overwrite = not self.textview_overwrite
for v,h in zip(self.textview, self.textview_overwrite_handlers):
v.disconnect(h)
if v != view:
v.emit("toggle-overwrite")
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.on_cursor_position_changed(view.get_buffer(), None, True)
#
# text buffer loading/saving
#
def set_labels(self, labels):
labels = labels[:len(self.textbuffer)]
for label, buf in zip(labels, self.textbuffer):
if label:
buf.data.label = label
def set_merge_output_file(self, filename):
if len(self.textbuffer) < 2:
return
buf = self.textbuffer[1]
buf.data.savefile = os.path.abspath(filename)
buf.data.set_label(filename)
self.set_buffer_writable(buf, os.access(buf.data.savefile, os.W_OK))
self.fileentry[1].set_filename(os.path.abspath(filename))
self.recompute_label()
def _set_save_action_sensitivity(self):
pane = self._get_focused_pane()
modified = False if pane == -1 else self.textbuffer[pane].data.modified
if self.main_actiongroup:
self.main_actiongroup.get_action("Save").set_sensitive(modified)
any_modified = any(b.data.modified for b in self.textbuffer)
self.actiongroup.get_action("SaveAll").set_sensitive(any_modified)
def recompute_label(self):
self._set_save_action_sensitivity()
filenames = []
for i in range(self.num_panes):
filenames.append(self.textbuffer[i].data.label)
shortnames = misc.shorten_names(*filenames)
for i in range(self.num_panes):
stock = None
if self.textbuffer[i].data.modified:
shortnames[i] += "*"
if self.textbuffer[i].data.writable:
stock = gtk.STOCK_SAVE
else:
stock = gtk.STOCK_SAVE_AS
if stock:
self.statusimage[i].show()
self.statusimage[i].set_from_stock(stock, gtk.ICON_SIZE_MENU)
self.statusimage[i].set_size_request(self.diffmap[0].size_request()[0],-1)
else:
self.statusimage[i].hide()
self.label_text = " : ".join(shortnames)
self.tooltip_text = self.label_text
self.label_changed()
def set_files(self, files):
"""Set num panes to len(files) and load each file given.
If an element is None, the text of a pane is left as is.
"""
self._disconnect_buffer_handlers()
files = list(files)
for i, f in enumerate(files):
if not f:
continue
if not isinstance(f, unicode):
files[i] = f = f.decode('utf8')
absfile = os.path.abspath(f)
self.fileentry[i].set_filename(absfile)
self.fileentry[i].prepend_history(absfile)
self.textbuffer[i].reset_buffer(absfile)
self.msgarea_mgr[i].clear()
self.recompute_label()
self.textview[len(files) >= 2].grab_focus()
self._connect_buffer_handlers()
self.scheduler.add_task(self._set_files_internal(files))
def get_comparison(self):
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
return recent.TYPE_FILE, files
def _load_files(self, files, textbuffers):
self.undosequence.clear()
yield _("[%s] Set num panes") % self.label_text
self.set_num_panes( len(files) )
self._disconnect_buffer_handlers()
self.linediffer.clear()
self.queue_draw()
try_codecs = self.prefs.text_codecs.split() or ['utf_8', 'utf_16']
yield _("[%s] Opening files") % self.label_text
tasks = []
def add_dismissable_msg(pane, icon, primary, secondary):
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
icon, primary, secondary)
button = msgarea.add_stock_button_with_text(_("Hi_de"),
gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
msgarea.connect("response",
lambda *args: self.msgarea_mgr[pane].clear())
msgarea.show_all()
return msgarea
for pane, filename in enumerate(files):
buf = textbuffers[pane]
if filename:
try:
handle = io.open(filename, "r", encoding=try_codecs[0])
task = TaskEntry(filename, handle, buf, try_codecs[:],
pane, False)
tasks.append(task)
except (IOError, LookupError) as e:
buf.delete(*buf.get_bounds())
add_dismissable_msg(pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(e))
yield _("[%s] Reading files") % self.label_text
while len(tasks):
for t in tasks[:]:
try:
nextbit = t.file.read(4096)
if nextbit.find("\x00") != -1:
t.buf.delete(*t.buf.get_bounds())
filename = gobject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s appears to be a binary file.") % filename)
tasks.remove(t)
continue
except ValueError as err:
t.codec.pop(0)
if len(t.codec):
t.buf.delete(*t.buf.get_bounds())
t.file = io.open(t.filename, "r", encoding=t.codec[0])
else:
t.buf.delete(*t.buf.get_bounds())
filename = gobject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s is not in encodings: %s") %
(filename, try_codecs))
tasks.remove(t)
except IOError as ioerr:
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(ioerr))
tasks.remove(t)
else:
# The handling here avoids inserting split CR/LF pairs into
# GtkTextBuffers; this is relevant only when universal
# newline support is unavailable or broken.
if t.was_cr:
nextbit = "\r" + nextbit
t.was_cr = False
if len(nextbit):
if nextbit[-1] == "\r" and len(nextbit) > 1:
t.was_cr = True
nextbit = nextbit[0:-1]
t.buf.insert(t.buf.get_end_iter(), nextbit)
else:
if t.buf.data.savefile:
writable = os.access(t.buf.data.savefile, os.W_OK)
else:
writable = os.access(t.filename, os.W_OK)
self.set_buffer_writable(t.buf, writable)
t.buf.data.encoding = t.codec[0]
if hasattr(t.file, "newlines"):
t.buf.data.newlines = t.file.newlines
tasks.remove(t)
yield 1
for b in self.textbuffer:
self.undosequence.checkpoint(b)
def _diff_files(self, refresh=False):
yield _("[%s] Computing differences") % self.label_text
texts = self.buffer_filtered[:self.num_panes]
step = self.linediffer.set_sequences_iter(texts)
while next(step) is None:
yield 1
if not refresh:
chunk, prev, next_ = self.linediffer.locate_chunk(1, 0)
self.cursor.next = chunk
if self.cursor.next is None:
self.cursor.next = next_
for buf in self.textbuffer:
buf.place_cursor(buf.get_start_iter())
if self.cursor.next is not None:
self.scheduler.add_task(
lambda: self.next_diff(gtk.gdk.SCROLL_DOWN, True), True)
else:
buf = self.textbuffer[1 if self.num_panes > 1 else 0]
self.on_cursor_position_changed(buf, None, True)
self.queue_draw()
self._connect_buffer_handlers()
self._set_merge_action_sensitivity()
langs = []
for i in range(self.num_panes):
filename = self.textbuffer[i].data.filename
if filename:
langs.append(srcviewer.get_language_from_file(filename))
else:
langs.append(None)
# If we have only one identified language then we assume that all of
# the files are actually of that type.
real_langs = [l for l in langs if l]
if real_langs and real_langs.count(real_langs[0]) == len(real_langs):
langs = (real_langs[0],) * len(langs)
for i in range(self.num_panes):
srcviewer.set_language(self.textbuffer[i], langs[i])
srcviewer.set_highlight_syntax(self.textbuffer[i],
self.prefs.use_syntax_highlighting)
def _set_files_internal(self, files):
for i in self._load_files(files, self.textbuffer):
yield i
for i in self._diff_files():
yield i
def refresh_comparison(self):
"""Refresh the view by clearing and redoing all comparisons"""
self._disconnect_buffer_handlers()
self.linediffer.clear()
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
buf.remove_tag(tag, buf.get_start_iter(), buf.get_end_iter())
self.queue_draw()
self.scheduler.add_task(self._diff_files(refresh=True))
def _set_merge_action_sensitivity(self):
pane = self._get_focused_pane()
if pane != -1:
editable = self.textview[pane].get_editable()
mergeable = self.linediffer.has_mergeable_changes(pane)
else:
editable = False
mergeable = (False, False)
self.actiongroup.get_action("MergeFromLeft").set_sensitive(mergeable[0] and editable)
self.actiongroup.get_action("MergeFromRight").set_sensitive(mergeable[1] and editable)
if self.num_panes == 3 and self.textview[1].get_editable():
mergeable = self.linediffer.has_mergeable_changes(1)
else:
mergeable = (False, False)
self.actiongroup.get_action("MergeAll").set_sensitive(mergeable[0] or mergeable[1])
def on_diffs_changed(self, linediffer, chunk_changes):
removed_chunks, added_chunks, modified_chunks = chunk_changes
# We need to clear removed and modified chunks, and need to
# re-highlight added and modified chunks.
need_clearing = sorted(list(removed_chunks))
need_highlighting = sorted(list(added_chunks) + [modified_chunks])
alltags = [b.get_tag_table().lookup("inline") for b in self.textbuffer]
for chunk in need_clearing:
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
for chunk in need_highlighting:
clear = chunk == modified_chunks
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
# We don't use self.buffer_texts here, as removing line
# breaks messes with inline highlighting in CRLF cases
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
# Bail on long sequences, rather than try a slow comparison
inline_limit = 10000
if len(text1) + len(textn) > inline_limit and \
not self.force_highlight:
for i in range(2):
bufs[i].apply_tag(tags[i], starts[i], ends[i])
self._prompt_long_highlighting()
continue
def apply_highlight(bufs, tags, starts, ends, texts, matches):
starts = [bufs[0].get_iter_at_mark(starts[0]),
bufs[1].get_iter_at_mark(starts[1])]
ends = [bufs[0].get_iter_at_mark(ends[0]),
bufs[1].get_iter_at_mark(ends[1])]
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
if texts != (text1, textn):
return
# Remove equal matches of size less than 3; highlight
# the remainder.
matches = [m for m in matches if m.tag != "equal" or
(m.end_a - m.start_a < 3) or
(m.end_b - m.start_b < 3)]
for i in range(2):
start, end = starts[i].copy(), starts[i].copy()
offset = start.get_offset()
for o in matches:
start.set_offset(offset + o[1 + 2 * i])
end.set_offset(offset + o[2 + 2 * i])
bufs[i].apply_tag(tags[i], start, end)
if clear:
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
starts = [bufs[0].create_mark(None, starts[0], True),
bufs[1].create_mark(None, starts[1], True)]
ends = [bufs[0].create_mark(None, ends[0], True),
bufs[1].create_mark(None, ends[1], True)]
match_cb = functools.partial(apply_highlight, bufs, tags,
starts, ends, (text1, textn))
self._cached_match.match(text1, textn, match_cb)
self._cached_match.clean(self.linediffer.diff_count())
self._set_merge_action_sensitivity()
if self.linediffer.sequences_identical():
error_message = True in [m.has_message() for m in self.msgarea_mgr]
if self.num_panes == 1 or error_message:
return
for index, mgr in enumerate(self.msgarea_mgr):
secondary_text = None
# TODO: Currently this only checks to see whether text filters
# are active, and may be altering the comparison. It would be
# better if we only showed this message if the filters *did*
# change the text in question.
active_filters = any([f.active for f in self.text_filters])
if active_filters:
secondary_text = _("Text filters are being used, and may "
"be masking differences between files. "
"Would you like to compare the "
"unfiltered files?")
msgarea = mgr.new_from_text_and_icon(gtk.STOCK_INFO,
_("Files are identical"),
secondary_text)
mgr.set_msg_id(FileDiff.MSG_SAME)
button = msgarea.add_stock_button_with_text(_("Hide"),
gtk.STOCK_CLOSE,
gtk.RESPONSE_CLOSE)
if index == 0:
button.props.label = _("Hi_de")
if active_filters:
msgarea.add_button(_("Show without filters"),
gtk.RESPONSE_OK)
msgarea.connect("response", self.on_msgarea_identical_response)
msgarea.show_all()
else:
for m in self.msgarea_mgr:
if m.get_msg_id() == FileDiff.MSG_SAME:
m.clear()
def _prompt_long_highlighting(self):
def on_msgarea_highlighting_response(msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == gtk.RESPONSE_OK:
self.force_highlight = True
self.refresh_comparison()
for index, mgr in enumerate(self.msgarea_mgr):
msgarea = mgr.new_from_text_and_icon(
gtk.STOCK_INFO,
_("Change highlighting incomplete"),
_("Some changes were not highlighted because they were too "
"large. You can force Meld to take longer to highlight "
"larger changes, though this may be slow."))
mgr.set_msg_id(FileDiff.MSG_SLOW_HIGHLIGHT)
button = msgarea.add_stock_button_with_text(
_("Hide"), gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
if index == 0:
button.props.label = _("Hi_de")
button = msgarea.add_button(
_("Keep highlighting"), gtk.RESPONSE_OK)
if index == 0:
button.props.label = _("_Keep highlighting")
msgarea.connect("response",
on_msgarea_highlighting_response)
msgarea.show_all()
def on_msgarea_identical_response(self, msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == gtk.RESPONSE_OK:
self.text_filters = []
self.refresh_comparison()
def on_textview_expose_event(self, textview, event):
if self.num_panes == 1:
return
if event.window != textview.get_window(gtk.TEXT_WINDOW_TEXT) \
and event.window != textview.get_window(gtk.TEXT_WINDOW_LEFT):
return
# Hack to redraw the line number gutter used by post-2.10 GtkSourceView
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT) and \
self.in_nested_textview_gutter_expose:
self.in_nested_textview_gutter_expose = False
return
visible = textview.get_visible_rect()
pane = self.textview.index(textview)
textbuffer = textview.get_buffer()
area = event.area
x, y = textview.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET,
area.x, area.y)
bounds = (textview.get_line_num_for_y(y),
textview.get_line_num_for_y(y + area.height + 1))
width, height = textview.allocation.width, textview.allocation.height
context = event.window.cairo_create()
context.rectangle(area.x, area.y, area.width, area.height)
context.clip()
context.set_line_width(1.0)
for change in self.linediffer.single_changes(pane, bounds):
ypos0 = textview.get_y_for_line_num(change[1]) - visible.y
ypos1 = textview.get_y_for_line_num(change[2]) - visible.y
context.rectangle(-0.5, ypos0 - 0.5, width + 1, ypos1 - ypos0)
if change[1] != change[2]:
context.set_source_color(self.fill_colors[change[0]])
context.fill_preserve()
if self.linediffer.locate_chunk(pane, change[1])[0] == self.cursor.chunk:
h = self.fill_colors['current-chunk-highlight']
context.set_source_rgba(
h.red_float, h.green_float, h.blue_float, 0.5)
context.fill_preserve()
context.set_source_color(self.line_colors[change[0]])
context.stroke()
if textview.is_focus() and self.cursor.line is not None:
it = textbuffer.get_iter_at_line(self.cursor.line)
ypos, line_height = textview.get_line_yrange(it)
context.save()
context.rectangle(0, ypos - visible.y, width, line_height)
context.clip()
context.set_source_color(self.highlight_color)
context.paint_with_alpha(0.25)
context.restore()
for syncpoint in [p[pane] for p in self.syncpoints]:
if bounds[0] <= syncpoint <= bounds[1]:
ypos = textview.get_y_for_line_num(syncpoint) - visible.y
context.rectangle(-0.5, ypos - 0.5, width + 1, 1)
context.set_source_color(self.syncpoint_color)
context.stroke()
current_time = glib.get_current_time()
new_anim_chunks = []
for c in self.animating_chunks[pane]:
percent = min(1.0, (current_time - c.start_time) / c.duration)
rgba_pairs = zip(c.start_rgba, c.end_rgba)
rgba = [s + (e - s) * percent for s, e in rgba_pairs]
it = textbuffer.get_iter_at_mark(c.start_mark)
ystart, _ = textview.get_line_yrange(it)
it = textbuffer.get_iter_at_mark(c.end_mark)
yend, _ = textview.get_line_yrange(it)
if ystart == yend:
ystart -= 1
context.set_source_rgba(*rgba)
context.rectangle(0, ystart - visible.y, width, yend - ystart)
context.fill()
if current_time <= c.start_time + c.duration:
new_anim_chunks.append(c)
else:
textbuffer.delete_mark(c.start_mark)
textbuffer.delete_mark(c.end_mark)
self.animating_chunks[pane] = new_anim_chunks
if self.animating_chunks[pane] and self.anim_source_id[pane] is None:
def anim_cb():
textview.queue_draw()
return True
# Using timeout_add interferes with recalculation of inline
# highlighting; this mechanism could be improved.
self.anim_source_id[pane] = gobject.idle_add(anim_cb)
elif not self.animating_chunks[pane] and self.anim_source_id[pane]:
gobject.source_remove(self.anim_source_id[pane])
self.anim_source_id[pane] = None
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT):
self.in_nested_textview_gutter_expose = True
textview.emit("expose-event", event)
def _get_filename_for_saving(self, title ):
dialog = gtk.FileChooserDialog(title,
parent=self.widget.get_toplevel(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK) )
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
filename = None
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
dialog.destroy()
if filename:
if os.path.exists(filename):
response = misc.run_dialog(
_('"%s" exists!\nOverwrite?') % os.path.basename(filename),
parent = self,
buttonstype = gtk.BUTTONS_YES_NO)
if response == gtk.RESPONSE_NO:
return None
return filename
return None
def _save_text_to_filename(self, filename, text):
try:
open(filename, "wb").write(text)
except IOError as e:
misc.run_dialog(
_("Error writing to %s\n\n%s.") % (filename, e),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK)
return False
return True
def save_file(self, pane, saveas=False):
buf = self.textbuffer[pane]
bufdata = buf.data
if saveas or not (bufdata.filename or bufdata.savefile) \
or not bufdata.writable:
if pane == 0:
prompt = _("Save Left Pane As")
elif pane == 1 and self.num_panes == 3:
prompt = _("Save Middle Pane As")
else:
prompt = _("Save Right Pane As")
filename = self._get_filename_for_saving(prompt)
if filename:
bufdata.filename = bufdata.label = os.path.abspath(filename)
bufdata.savefile = None
self.fileentry[pane].set_filename(bufdata.filename)
self.fileentry[pane].prepend_history(bufdata.filename)
else:
return False
start, end = buf.get_bounds()
text = text_type(buf.get_text(start, end, False), 'utf8')
if bufdata.newlines:
if isinstance(bufdata.newlines, basestring):
if bufdata.newlines != '\n':
text = text.replace("\n", bufdata.newlines)
else:
buttons = {
'\n': ("UNIX (LF)", 0),
'\r\n': ("DOS/Windows (CR-LF)", 1),
'\r': ("Mac OS (CR)", 2),
}
newline = misc.run_dialog( _("This file '%s' contains a mixture of line endings.\n\nWhich format would you like to use?") % bufdata.label,
self, gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_CANCEL,
extrabuttons=[ buttons[b] for b in bufdata.newlines ] )
if newline < 0:
return
for k,v in buttons.items():
if v[1] == newline:
bufdata.newlines = k
if k != '\n':
text = text.replace('\n', k)
break
if bufdata.encoding:
try:
text = text.encode(bufdata.encoding)
except UnicodeEncodeError:
if misc.run_dialog(
_("'%s' contains characters not encodable with '%s'\nWould you like to save as UTF-8?") % (bufdata.label, bufdata.encoding),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_YES_NO) != gtk.RESPONSE_YES:
return False
save_to = bufdata.savefile or bufdata.filename
if self._save_text_to_filename(save_to, text):
self.emit("file-changed", save_to)
self.undosequence.checkpoint(buf)
return True
else:
return False
def make_patch(self, *extra):
dialog = patchdialog.PatchDialog(self)
dialog.run()
def set_buffer_writable(self, buf, writable):
buf.data.writable = writable
self.recompute_label()
index = self.textbuffer.index(buf)
self.readonlytoggle[index].props.visible = not writable
self.set_buffer_editable(buf, writable)
def set_buffer_modified(self, buf, yesno):
buf.data.modified = yesno
self.recompute_label()
def set_buffer_editable(self, buf, editable):
buf.data.editable = editable
index = self.textbuffer.index(buf)
self.readonlytoggle[index].set_active(not editable)
self.textview[index].set_editable(editable)
self.on_cursor_position_changed(buf, None, True)
for linkmap in self.linkmap:
linkmap.queue_draw()
def save(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane)
def save_as(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane, True)
def on_save_all_activate(self, action):
for i in range(self.num_panes):
if self.textbuffer[i].data.modified:
self.save_file(i)
def on_fileentry_activate(self, entry):
if self.check_save_modified() != gtk.RESPONSE_CANCEL:
entries = self.fileentry[:self.num_panes]
paths = [e.get_full_path() for e in entries]
paths = [p.decode('utf8') for p in paths]
self.set_files(paths)
return True
def _get_focused_pane(self):
for i in range(self.num_panes):
if self.textview[i].is_focus():
return i
return -1
def on_revert_activate(self, *extra):
response = gtk.RESPONSE_OK
unsaved = [b.data.label for b in self.textbuffer if b.data.modified]
if unsaved:
ui_path = paths.ui_dir("filediff.ui")
dialog = gnomeglade.Component(ui_path, "revert_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
filelist = "\n".join(["\t" + f for f in unsaved])
dialog.widget.props.secondary_text += filelist
response = dialog.widget.run()
dialog.widget.destroy()
if response == gtk.RESPONSE_OK:
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
self.set_files(files)
def on_refresh_activate(self, *extra):
self.refresh_comparison()
def queue_draw(self, junk=None):
for t in self.textview:
t.queue_draw()
for i in range(self.num_panes-1):
self.linkmap[i].queue_draw()
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
def on_action_lock_scrolling_toggled(self, action):
self.toggle_scroll_lock(action.get_active())
def on_lock_button_toggled(self, button):
self.toggle_scroll_lock(not button.get_active())
def toggle_scroll_lock(self, locked):
icon_name = "meld-locked" if locked else "meld-unlocked"
self.lock_button_image.props.icon_name = icon_name
self.lock_button.set_active(not locked)
self.actiongroup.get_action("LockScrolling").set_active(locked)
self._scroll_lock = not locked
def on_readonly_button_toggled(self, button):
index = self.readonlytoggle.index(button)
buf = self.textbuffer[index]
self.set_buffer_editable(buf, not button.get_active())
#
# scrollbars
#
def _sync_hscroll(self, adjustment):
if self._sync_hscroll_lock or self._scroll_lock:
return
self._sync_hscroll_lock = True
val = adjustment.get_value()
for sw in self.scrolledwindow[:self.num_panes]:
adj = sw.get_hadjustment()
if adj is not adjustment:
adj.set_value(val)
self._sync_hscroll_lock = False
def _sync_vscroll(self, adjustment, master):
# only allow one scrollbar to be here at a time
if self._sync_vscroll_lock:
return
if not self._scroll_lock and (self.keymask & MASK_SHIFT) == 0:
self._sync_vscroll_lock = True
syncpoint = 0.5
# the line to search for in the 'master' text
master_y = adjustment.value + adjustment.page_size * syncpoint
it = self.textview[master].get_line_at_y(int(master_y))[0]
line_y, height = self.textview[master].get_line_yrange(it)
line = it.get_line() + ((master_y-line_y)/height)
# scrollbar influence 0->1->2 or 0<-1->2 or 0<-1<-2
scrollbar_influence = ((1, 2), (0, 2), (1, 0))
for i in scrollbar_influence[master][:self.num_panes - 1]:
adj = self.scrolledwindow[i].get_vadjustment()
mbegin, mend = 0, self.textbuffer[master].get_line_count()
obegin, oend = 0, self.textbuffer[i].get_line_count()
# look for the chunk containing 'line'
for c in self.linediffer.pair_changes(master, i):
if c[1] >= line:
mend = c[1]
oend = c[3]
break
elif c[2] >= line:
mbegin, mend = c[1], c[2]
obegin, oend = c[3], c[4]
break
else:
mbegin = c[2]
obegin = c[4]
fraction = (line - mbegin) / ((mend - mbegin) or 1)
other_line = (obegin + fraction * (oend - obegin))
it = self.textbuffer[i].get_iter_at_line(int(other_line))
val, height = self.textview[i].get_line_yrange(it)
val -= (adj.page_size) * syncpoint
val += (other_line-int(other_line)) * height
val = min(max(val, adj.lower), adj.upper - adj.page_size)
adj.set_value( val )
# If we just changed the central bar, make it the master
if i == 1:
master, line = 1, other_line
self._sync_vscroll_lock = False
for lm in self.linkmap:
if lm.window:
lm.window.invalidate_rect(None, True)
lm.window.process_updates(True)
def set_num_panes(self, n):
if n != self.num_panes and n in (1,2,3):
self.num_panes = n
toshow = self.scrolledwindow[:n] + self.fileentry[:n]
toshow += self.vbox[:n] + self.msgarea_mgr[:n]
toshow += self.linkmap[:n-1] + self.diffmap[:n]
toshow += self.selector_hbox[:n]
for widget in toshow:
widget.show()
tohide = self.statusimage + self.scrolledwindow[n:] + self.fileentry[n:]
tohide += self.vbox[n:] + self.msgarea_mgr[n:]
tohide += self.linkmap[n-1:] + self.diffmap[n:]
tohide += self.selector_hbox[n:]
for widget in tohide:
widget.hide()
right_attach = 2 * n
if self.findbar.widget in self.table:
self.table.remove(self.findbar.widget)
self.table.attach(self.findbar.widget, 1, right_attach, 2, 3,
gtk.FILL, gtk.FILL)
self.actiongroup.get_action("MakePatch").set_sensitive(n > 1)
self.actiongroup.get_action("CycleDocuments").set_sensitive(n > 1)
def coords_iter(i):
buf_index = 2 if i == 1 and self.num_panes == 3 else i
get_end_iter = self.textbuffer[buf_index].get_end_iter
get_iter_at_line = self.textbuffer[buf_index].get_iter_at_line
get_line_yrange = self.textview[buf_index].get_line_yrange
def coords_by_chunk():
y, h = get_line_yrange(get_end_iter())
max_y = float(y + h)
for c in self.linediffer.single_changes(i):
y0, _ = get_line_yrange(get_iter_at_line(c[1]))
if c[1] == c[2]:
y, h = y0, 0
else:
y, h = get_line_yrange(get_iter_at_line(c[2] - 1))
yield c[0], y0 / max_y, (y + h) / max_y
return coords_by_chunk
for (w, i) in zip(self.diffmap, (0, self.num_panes - 1)):
scroll = self.scrolledwindow[i].get_vscrollbar()
w.setup(scroll, coords_iter(i), [self.fill_colors, self.line_colors])
for (w, i) in zip(self.linkmap, (0, self.num_panes - 2)):
w.associate(self, self.textview[i], self.textview[i + 1])
for i in range(self.num_panes):
if self.textbuffer[i].data.modified:
self.statusimage[i].show()
self.queue_draw()
self.recompute_label()
def next_diff(self, direction, centered=False):
pane = self._get_focused_pane()
if pane == -1:
if len(self.textview) > 1:
pane = 1
else:
pane = 0
buf = self.textbuffer[pane]
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev
if target is None:
return
c = self.linediffer.get_chunk(target, pane)
if c:
# Warp the cursor to the first line of next chunk
if self.cursor.line != c[1]:
buf.place_cursor(buf.get_iter_at_line(c[1]))
if centered:
self.textview[pane].scroll_to_mark(buf.get_insert(), 0.0,
True)
else:
self.textview[pane].scroll_to_mark(buf.get_insert(), 0.2)
def copy_chunk(self, src, dst, chunk, copy_up):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
start = b0.get_iter_at_line_or_eof(chunk[1])
end = b0.get_iter_at_line_or_eof(chunk[2])
t0 = text_type(b0.get_text(start, end, False), 'utf8')
if copy_up:
if chunk[2] >= b0.get_line_count() and \
chunk[3] < b1.get_line_count():
# TODO: We need to insert a linebreak here, but there is no
# way to be certain what kind of linebreak to use.
t0 = t0 + "\n"
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[3], t0)
else: # copy down
dst_start = b1.get_iter_at_line_or_eof(chunk[4])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[4], t0)
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def replace_chunk(self, src, dst, chunk):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
src_start = b0.get_iter_at_line_or_eof(chunk[1])
src_end = b0.get_iter_at_line_or_eof(chunk[2])
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
dst_end = b1.get_iter_at_line_or_eof(chunk[4])
t0 = text_type(b0.get_text(src_start, src_end, False), 'utf8')
mark0 = b1.create_mark(None, dst_start, True)
self.on_textbuffer__begin_user_action()
b1.delete(dst_start, dst_end)
new_end = b1.insert_at_line(chunk[3], t0)
self.on_textbuffer__end_user_action()
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def delete_chunk(self, src, chunk):
b0 = self.textbuffer[src]
it = b0.get_iter_at_line_or_eof(chunk[1])
if chunk[2] >= b0.get_line_count():
it.backward_char()
b0.delete(it, b0.get_iter_at_line_or_eof(chunk[2]))
mark0 = b0.create_mark(None, it, True)
mark1 = b0.create_mark(None, it, True)
# TODO: Need a more specific colour here; conflict is wrong
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['conflict']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['conflict']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[src].append(anim)
def add_sync_point(self, action):
pane = self._get_focused_pane()
if pane == -1:
return
# Find a non-complete syncpoint, or create a new one
if self.syncpoints and None in self.syncpoints[-1]:
syncpoint = self.syncpoints.pop()
else:
syncpoint = [None] * self.num_panes
cursor_it = self.textbuffer[pane].get_iter_at_mark(
self.textbuffer[pane].get_insert())
syncpoint[pane] = cursor_it.get_line()
self.syncpoints.append(syncpoint)
valid_points = [p for p in self.syncpoints if all(p)]
if valid_points and self.num_panes == 2:
self.linediffer.syncpoints = [
((p[1], p[0]), ) for p in valid_points]
elif valid_points and self.num_panes == 3:
self.linediffer.syncpoints = [
((p[1], p[0]), (p[1], p[2])) for p in valid_points]
self.refresh_comparison()
def clear_sync_points(self, action):
self.syncpoints = []
self.linediffer.syncpoints = []
self.refresh_comparison()
| gpl-2.0 | -4,633,573,906,481,092,000 | 42.910952 | 154 | 0.556036 | false |
WmHHooper/aima-python | submissions/Thompson/myLogic.py | 1 | 1132 | ##
farmer = {
'kb': '''
Farmer(Mac)
Rabbit(Pete)
Mother(MrsMac, Mac)
Mother(MrsRabbit, Pete)
(Rabbit(r) & Farmer(f)) ==> Hates(f, r)
(Mother(m, c)) ==> Loves(m, c)
(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)
(Farmer(f)) ==> Human(f)
(Mother(m, h) & Human(h)) ==> Human(m)
''',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'queries':'''
Human(x)
Hates(x, y)
''',
# 'limit': 1,
}
weapons = {
'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
''',
}
wrath = {
'kb': '''
Father(Terenas)
DeathKnight(Arthas)
Living(Alliance)
Living(Horde)
Dead(Scourge)
(Living(f) & Dead(e) & DeathKnight(s)) ==> Kills(s, f, e)
(Father(f) & DeathKnight(s)) ==> Father(f, s)
''',
'queries': '''
Kills(x,y,z)
Father(x,y)
''',
}
Examples = {
# 'farmer': farmer,
# 'weapons': weapons,
'wrath': wrath,
}
#
# | mit | -1,356,765,412,437,163,300 | 16.703125 | 71 | 0.560954 | false |
DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/tests/test_address_list.py | 1 | 18783 | from django.test import TestCase
from addressbase.tests.factories import AddressFactory
from data_importers.data_types import AddressList
class MockLogger:
logs = []
def log_message(self, level, message, variable=None, pretty=False):
self.logs.append(message)
def clear_logs(self):
self.logs = []
class AddressListTest(TestCase):
def test_append(self):
in_list = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{ # Doesn't need a uprn - this should be added
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "",
},
{ # Does need a postcode - this shouldn't
"address": "baz",
"postcode": "",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
]
expected = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
self.assertEqual(expected, address_list.elements)
def test_add_with_duplicates(self):
in_list = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "2",
},
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
]
expected = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "2",
},
{ # This is correct we deal with duplicates later.
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
self.assertEqual(expected, address_list.elements)
def test_get_uprn_lookup(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "2",
},
{
"polling_station_id": "01",
"address": "foo 3",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "3",
},
{
"polling_station_id": "02",
"address": "foo 4",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "4",
},
{
"polling_station_id": "02",
"address": "foo 5",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "5",
},
{
"polling_station_id": "01",
"address": "foo 5",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "5",
},
]
expected = {
"1": {"01"},
"2": {"01"},
"3": {"01"},
"4": {"02"},
"5": {"02", "01"},
}
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
self.assertEqual(expected, address_list.get_uprn_lookup())
def test_remove_duplicate_uprns(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "2",
},
{
"polling_station_id": "01",
"address": "foo 3",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "3",
},
{
"polling_station_id": "02",
"address": "foo 4",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "4",
},
{
"polling_station_id": "02",
"address": "foo 5",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "5",
},
{
"polling_station_id": "01",
"address": "foo 5",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "5",
},
]
expected = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "2",
},
{
"polling_station_id": "01",
"address": "foo 3",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "3",
},
{
"polling_station_id": "02",
"address": "foo 4",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "4",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
address_list.remove_duplicate_uprns()
self.assertEqual(expected, address_list.elements)
def test_get_polling_station_lookup(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "01",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "02",
},
{
"polling_station_id": "01",
"address": "foo 3",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "03",
},
{
"polling_station_id": "02",
"address": "foo 4",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "04",
},
]
expected = {
"01": {
"01",
"02",
"03",
},
"02": {"04"},
}
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
self.assertEqual(expected, address_list.get_polling_station_lookup())
def test_remove_records_not_in_addressbase(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "2",
},
]
addressbase_data = {"1": {"postcode": "AA1 2BB"}}
expected = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
address_list.remove_records_not_in_addressbase(addressbase_data)
self.assertEqual(expected, address_list.elements)
def test_remove_records_that_dont_match_addressbase(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "2",
},
]
addressbase_data = {"1": {"postcode": "AA1 2BB"}, "2": {"postcode": "AA1 2CC"}}
expected = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "1",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
address_list.remove_records_that_dont_match_addressbase(addressbase_data)
self.assertEqual(expected, address_list.elements)
def test_remove_records_that_dont_match_addressbase_with_duplicates(self):
in_list = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "10",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "20",
},
{
"polling_station_id": "01",
"address": "foo 2",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "20",
},
]
addressbase_data = {
"10": {"postcode": "AA1 2BB"},
"20": {"postcode": "AA1 2CC"},
}
expected = [
{
"polling_station_id": "01",
"address": "foo 1",
"postcode": "AA1 2BB",
"council": "AAA",
"uprn": "10",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
address_list.remove_records_that_dont_match_addressbase(addressbase_data)
self.assertEqual(expected, address_list.elements)
def test_get_council_split_postcodes(self):
in_list = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "02",
"uprn": "2",
},
{
"address": "baz",
"postcode": "BB11BB",
"council": "AAA",
"polling_station_id": "03",
"uprn": "1",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
expected = ["AA11AA"]
self.assertListEqual(expected, address_list.get_council_split_postcodes())
def test_remove_records_missing_uprns(self):
in_list = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
{ # Doesn't need a uprn - this should be added
"address": "bar",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "",
},
{ # Does need a postcode - this shouldn't
"address": "baz",
"postcode": "",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
]
expected = [
{
"address": "foo",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "01",
"uprn": "1",
},
]
address_list = AddressList(MockLogger())
for el in in_list:
address_list.append(el)
address_list.remove_records_missing_uprns()
self.assertEqual(expected, address_list.elements)
def test_check_split_postcodes_are_split(self):
"""
AddressBase | Council Data
|
uprn | Address | Postcode | uprn | Address | Postcode | Station
---------------------------------|------------------------------------------
123 | 1 Foo Street | AA11AA | 123 | 1 Foo Street | AA11AA | A1
124 | 2 Foo Street | AA11AA | 124 | 2 Foo Street | AA11AA | A1
125 | 3 Foo Street | AA11AA | 125 | 3 Foo Street | AA22AA | A2
---------------------------------|------------------------------------------
223 | 1 Bar Street | BB11BB | 223 | 1 Bar Street | BB11BB | B1
224 | 2 Bar Street | BB11BB | 224 | 2 Bar Street | BB11BB | B1
| 225 | 3 Bar Street | BB11BB | B2
---------------------------------|------------------------------------------
323 | 1 Baz Street | CC11CC | 323 | 1 Baz Street | CC11CC | C1
324 | 2 Baz Street | CC11CC | 324 | 2 Baz Street | CC11CC | C1
325 | 3 Baz Street | CC22CC | 325 | 3 Baz Street | CC11CC | C2
BB11BB and CC11CC are the ones we won't think are split, but are really.
Our checks should remove 225 and 325 because there aren't matches for them
in addressbase. Therefore they're not in the in_list.
"""
in_list = [
{
"address": "1 Foo Street",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "A1",
"uprn": "123",
},
{
"address": "2 Foo Street",
"postcode": "AA11AA",
"council": "AAA",
"polling_station_id": "A1",
"uprn": "124",
},
{
"address": "3 Foo Street",
"postcode": "AA22AA",
"council": "AAA",
"polling_station_id": "A2",
"uprn": "125",
},
{
"address": "1 Bar Street",
"postcode": "BB11BB",
"council": "AAA",
"polling_station_id": "B1",
"uprn": "223",
},
{
"address": "2 Bar Street",
"postcode": "BB11BB",
"council": "AAA",
"polling_station_id": "B1",
"uprn": "224",
},
{
"address": "1 Baz Street",
"postcode": "CC11CC",
"council": "AAA",
"polling_station_id": "C1",
"uprn": "323",
},
{
"address": "2 Baz Street",
"postcode": "CC11CC",
"council": "AAA",
"polling_station_id": "C1",
"uprn": "324",
},
]
addressbase = [
# AA11A
{"uprn": "123", "address": "1 Foo Street", "postcode": "AA11AA"},
{"uprn": "124", "address": "2 Foo Street", "postcode": "AA11AA"},
{"uprn": "125", "address": "3 Foo Street", "postcode": "AA11AA"},
# BB11BB
{"uprn": "223", "address": "1 Bar Street ", "postcode": "BB11BB"},
{"uprn": "224", "address": "2 Bar Street ", "postcode": "BB11BB"},
# CC11CC
{"uprn": "323", "address": "1 Baz Street", "postcode": "CC11CC"},
{"uprn": "324", "address": "2 Baz Street", "postcode": "CC11CC"},
# CC22CC
{"uprn": "325", "address": "3 Baz Street", "postcode": "CC22CC"},
]
for address in addressbase:
AddressFactory(**address)
address_list = AddressList(MockLogger())
address_list.logger.clear_logs()
for el in in_list:
address_list.append(el)
split_postcodes = ["BB11BB", "CC11CC"]
address_list.check_split_postcodes_are_split(split_postcodes)
self.assertListEqual(
address_list.logger.logs,
[
'These postcodes are split in council data: "BB11BB", "CC11CC", but won\'t be in the db once imported.'
],
)
def test_check_records(self):
pass
| bsd-3-clause | -7,876,145,225,259,177,000 | 30.568067 | 119 | 0.369909 | false |
ContextLab/quail | quail/analysis/lagcrp.py | 1 | 4765 | import numpy as np
import pandas as pd
from .recmat import recall_matrix
from scipy.spatial.distance import cdist
from ..helpers import check_nan
def lagcrp_helper(egg, match='exact', distance='euclidean',
ts=None, features=None):
"""
Computes probabilities for each transition distance (probability that a word
recalled will be a given distance--in presentation order--from the previous
recalled word).
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each float is the probability of transition distance (distnaces indexed by
position, from -(n-1) to (n-1), excluding zero
"""
def lagcrp(rec, lstlen):
"""Computes lag-crp for a given recall list"""
def check_pair(a, b):
if (a>0 and b>0) and (a!=b):
return True
else:
return False
def compute_actual(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in range(0,len(rec)-1):
a=rec[trial]
b=rec[trial+1]
if check_pair(a, b) and (a not in recalled) and (b not in recalled):
arr[b-a]+=1
recalled.append(a)
return arr
def compute_possible(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in rec:
if np.isnan(trial):
pass
else:
lbound=int(1-trial)
ubound=int(lstlen-trial)
chances=list(range(lbound,0))+list(range(1,ubound+1))
for each in recalled:
if each-trial in chances:
chances.remove(each-trial)
arr[chances]+=1
recalled.append(trial)
return arr
actual = compute_actual(rec, lstlen)
possible = compute_possible(rec, lstlen)
crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]
crp.insert(int(len(crp) / 2), np.nan)
return crp
def nlagcrp(distmat, ts=None):
def lagcrp_model(s):
idx = list(range(0, -s, -1))
return np.array([list(range(i, i+s)) for i in idx])
# remove nan columns
distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T
model = lagcrp_model(distmat.shape[1])
lagcrp = np.zeros(ts * 2)
for rdx in range(len(distmat)-1):
item = distmat[rdx, :]
next_item = distmat[rdx+1, :]
if not np.isnan(item).any() and not np.isnan(next_item).any():
outer = np.outer(item, next_item)
lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts))))
lagcrp /= ts
lagcrp = list(lagcrp)
lagcrp.insert(int(len(lagcrp) / 2), np.nan)
return np.array(lagcrp)
def _format(p, r):
p = np.matrix([np.array(i) for i in p])
if p.shape[0]==1:
p=p.T
r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r)
r = np.matrix([np.array(i) for i in r])
if r.shape[0]==1:
r=r.T
return p, r
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if not ts:
ts = egg.pres.shape[1]
if match in ['exact', 'best']:
lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat]
elif match is 'smooth':
lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0))
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(lagcrp, axis=0)
| mit | 8,503,094,469,382,664,000 | 35.937984 | 101 | 0.555089 | false |
SuLab/scheduled-bots | scheduled_bots/query_tester/validators.py | 1 | 1080 |
class Validator:
description = '' # Plain text description of what is being checked
expected_result = [] # optional
def __init__(self):
self.success = None # True or False
self.result_message = '' # optional extra information about test result
def validate(self, result):
raise NotImplementedError("Implement a Validator Subclass")
class OneOrMoreResultsValidator(Validator):
description = "Checks for at least 1 result"
def validate(self, result):
self.success = True if len(result) >= 1 else False
class NoResultsValidator(Validator):
description = "Checks for no results"
def validate(self, result):
self.success = True if len(result) == 0 else False
class NoValidator(Validator):
description = "No validation"
def validate(self, result):
self.success = None
class FailValidator(Validator):
description = "Always returns FAIL"
expected_result = [{'a': 4}]
def validate(self, result):
self.success = False
self.result_message = "this is more info" | mit | 5,172,143,440,517,708,000 | 28.216216 | 80 | 0.669444 | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/signal/signaltools.py | 1 | 56054 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
from . import sigtools
from scipy.lib.six import callable
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy.fft import rfftn, irfftn
from numpy import polyadd, polymul, polydiv, polysub, roots, \
poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, real_if_close, zeros, array, arange, where, rank, \
newaxis, product, ravel, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, mean, ndarray, atleast_2d
import numpy as np
from scipy.misc import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort',
'unique_roots', 'invres', 'invresz', 'residue', 'residuez',
'resample', 'detrend', 'lfilter_zi', 'filtfilt', 'decimate']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm' (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two arrays x and y of rank d is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
in1 = asarray(in1)
in2 = asarray(in2)
val = _valfrommode(mode)
if rank(in1) == rank(in2) == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same rank")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if rank(in1) == rank(in2) == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same rank")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Always use 2**n-sized FFT
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
if not complex_result:
ret = irfftn(rfftn(in1, fsize) *
rfftn(in2, fsize), fsize)[fslice].copy()
ret = ret.real
else:
ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy()
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the inital conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
zi = zeros(K, y.dtype.char)
if x is None:
x = zeros(M, y.dtype.char)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves `divisor` out of `signal`.
Parameters
----------
signal : array
Signal input
divisor : array
Divisor input
Returns
-------
q : array
Quotient of the division
r : array
Remainder
Examples
--------
>>> from scipy import signal
>>> sig = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1,])
>>> filter = np.array([1,1,0])
>>> res = signal.convolve(sig, filter)
>>> signal.deconvolve(res, filter)
(array([ 0., 0., 0., 0., 0., 1., 1., 1., 1.]),
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]))
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be rank 2.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion: r,p,k
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \
/ factorial(sig - m)
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion: r,p,k
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from dx to:
dx * len(x) / num
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be rank 1.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be rank 1.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
a = a / a[0]
b = b / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward
and once backwards. The combined filter has linear phase.
Before applying the filter, the function can pad the data along the
given axis in one of three ways: odd, even or constant. The odd
and even extensions have the corresponding symmetry about the end point
of the data. The constant extension extends the data with the values
at end points. On both the forward and backwards passes, the
initial condition of the filter is found by using `lfilter_zi` and
scaling it by the end point of the extended data.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
`x.shape[axis]-1`. `padlen=0` implies no padding.
The default value is 3*max(len(a),len(b)).
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Examples
--------
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to x with filtfilt. The
result should be approximately xlow, with no phase shift.
>>> from scipy import signal
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype must "
"be 'even', 'odd', 'constant', or None.") %
padtype)
b = np.asarray(b)
a = np.asarray(a)
x = np.asarray(x)
ntaps = max(len(a), len(b))
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| gpl-3.0 | -7,537,377,946,632,358,000 | 29.86697 | 80 | 0.52478 | false |
roderickmackenzie/gpvdm | gpvdm_gui/gui/token_lib.py | 1 | 59716 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package token_lib
# A library of all tokens used in the model.
#
import re
import i18n
_ = i18n.language.gettext
class my_data():
token=""
units=""
info=""
def __init__(self,file_name,a,b,info,e,f,widget,defaults=None,units_widget="QLabel",min=None,max=None,hidden=False,hide_on_true_token="none",hide_on_false_token=[],data_type=None,hide_on_token_eq=None):
self.file_name=file_name
self.token=a
self.units=b
self.info=info
self.defaults=defaults
self.number_type=e
self.number_mul=f
self.widget=widget
self.units_widget=units_widget
self.hidden=hidden
self.hide_on_true_token=hide_on_true_token
self.hide_on_false_token=hide_on_false_token
self.hide_on_token_eq=hide_on_token_eq
self.data_type=data_type
self.min=min
self.max=max
lib=[]
def build_token_lib():
global lib
#light.inp
lib.append(my_data("","#light_wavelength_auto_mesh",_("True/False"),_("Automatically mesh wavelength space"),"e",1.0,"gtkswitch"))
lib.append(my_data("light.inp","#lpoints","au",_("Mesh points (lambda)"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#lstart","m",_("Lambda start"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#lstop","m",_("Lambda stop"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#electron_eff","0-1",_("Electron generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#hole_eff","0-1",_("Hole generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#sun",_("filename"),_("Sun's spectra"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#light_file_generation","file_name",_("File containing generation rate"),"e",1.0,"gpvdm_select"))
lib.append(my_data("light.inp","#Dphotoneff","0-1",_("Photon efficiency"),"e",1.0,"QLineEdit",min=0.001,max=1.2))
lib.append(my_data("light.inp","#light_file_qe_spectra","au",_("QE spectra file"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#light_profile","au",_("Profile of light beam"),"s",1.0,"QComboBoxShape"))
#filter.inp
lib.append(my_data("filter.inp","#filter_material","...",_("Optical filter material"),"e",1.0,"gpvdm_select_material" ,units_widget="QPushButton"))
lib.append(my_data("filter.inp","#filter_db","0-1000dB",_("dB"),"e",1.0,"QLineEdit"))
#laser?.inp
lib.append(my_data("","#laserwavelength","m",_("Laser wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#laser_pulse_width","s",_("Length of pulse"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#spotx","m",_("Spot size x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#spoty","m",_("Spot size y"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulseJ","J",_("Energy in pulse"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#laser_photon_efficiency","0-1",_("Efficiency of photons"),"e",1.0,"QLineEdit"))
#dos?.inp
lib.append(my_data("","#dostype","Edit",_("DoS distribution"),"s",1.0,"generic_switch",units_widget="QPushButton",defaults=[[_("Complex"),"complex"],[_("Exponential"),"exponential"]]))
lib.append(my_data("","#dos_free_carrier_stats","type",_("Free carrier statistics"),"e",1.0,"QComboBoxLang",defaults=[[("mb_equation"),_("Maxwell Boltzmann - analytic")],["mb_look_up_table_analytic",_("Maxwell Boltzmann - numerical+analytic")],["mb_look_up_table",_("Maxwell Boltzmann - full numerical")],["fd_look_up_table",_("Ferm-Dirac - numerical")]]))
lib.append(my_data("","#Nc","m^{-3}",_("Effective density of free electron states (@300K)"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Nv","m^{-3}",_("Effective density of free hole states (@300K)"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#symmetric_mobility_e","m^{2}V^{-1}s^{-1}",_("Electron mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,defaults=[True]))
lib.append(my_data("","#symmetric_mobility_h","m^{2}V^{-1}s^{-1}",_("Hole mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-14, defaults=[False] ))
lib.append(my_data("","#mue_z","m^{2}V^{-1}s^{-1}",_("Electron mobility z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#mue_x","m^{2}V^{-1}s^{-1}",_("Electron mobility x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#mue_y","m^{2}V^{-1}s^{-1}",_("Electron mobility y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_z","m^{2}V^{-1}s^{-1}",_("Hole mobility z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_x","m^{2}V^{-1}s^{-1}",_("Hole mobility x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_y","m^{2}V^{-1}s^{-1}",_("Hole mobility y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#symmetric_mobility_h","m^{2}V^{-1}s^{-1}",_("Hole mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-14, defaults=[False] ))
lib.append(my_data("","#ion_density","m^{-3}",_("Perovskite ion density"),"e",1.0,"QLineEdit",min=1e10,max=1e27,hidden=True))
#lib.append(my_data("","#ion_mobility","m^{2}V^{-1}s^{-1}",_("Perovskite ion mobility"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#doping_start","m^{-3}",_("Doping density (x=0)"),"e",1.0,"QLineEdit",min=1.0,max=1e27,hidden=True))
lib.append(my_data("","#doping_stop","m^{-3}",_("Doping density (x=max)"),"e",1.0,"QLineEdit",min=1.0,max=1e27,hidden=True))
lib.append(my_data("","#Ntrape","m^{-3} eV^{-1}",_("Electron trap density"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Ntraph","m^{-3} eV^{-1}",_("Hole trap density"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Etrape","eV",_("Electron tail slope"),"e",1.0,"QLineEdit",min=20e-3,max=150e-3 ))
lib.append(my_data("","#Etraph","eV",_("Hole tail slope"),"e",1.0,"QLineEdit",min=20e-3,max=150e-3 ))
lib.append(my_data("","#epsilonr","au",_("Relative permittivity"),"e",1.0,"QLineEdit",min=1.0,max=10.0 ))
lib.append(my_data("","#srhsigman_e","m^{-2}",_("Free electron to Trapped electron"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigmap_e","m^{-2}",_("Trapped electron to Free hole"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigman_h","m^{-2}",_("Trapped hole to Free electron"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigmap_h","m^{-2}",_("Free hole to Trapped hole"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15))
lib.append(my_data("","#free_to_free_recombination","m^{3}s^{-1}",_("n_{free} to p_{free} Recombination rate constant"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
#electrical?.inp
lib.append(my_data("","#electrical_component","type",_("Component"),"e",1.0,"QComboBoxLang",defaults=[[("resistance"),_("Resistance")],["diode",_("Diode")],["link",_("Link")]]))
lib.append(my_data("","#electrical_shunt","Ohm m",_("Shunt resistivity"),"e",1.0,"QLineEdit",min=0.1,max=1e20, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
#lib.append(my_data("","#electrical_series","Ohm m",_("Series resistivity"),"e",1.0,"QLineEdit",min=0.1,max=1e20, hide_on_token_eq=[["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_symmetrical_resistance","Ohm m",_("Series resistivity"),"e",1.0,"mobility_widget", defaults=[False] ))
lib.append(my_data("","#electrical_series_z","Ohm m",_("Series resistivity z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_series_x","Ohm m",_("Series resistivity x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_series_y","Ohm m",_("Series resistivity y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_n","au",_("Layer ideality factor"),"e",1.0,"QLineEdit",min=0.0,max=1.0, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_J0","A m^{-2}",_("Reverse bias current"),"e",1.0,"QLineEdit",min=0.0,max=1e6, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_enable_generation",_("True/False"),_("Enable optical charge\ncarrier generation"),"e",1.0,"gtkswitch" ))
#shape?.inp
lib.append(my_data("","#shape_type","au",_("Shape type"),"s",1.0,"QComboBoxShape"))
lib.append(my_data("","#shape_dx","m",_("dx of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_dy","m",_("dy of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_dz","m",_("dz of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_padding_dx","m",_("dx padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_padding_dy","m",_("dy padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_padding_dz","m",_("dz padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_nx","au",_("Number of objects x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_ny","au",_("Number of objects y"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_nz","au",_("Number of objects z"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_x0","m",_("x offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_y0","m",_("y offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_z0","m",_("z offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_remove_layer",_("True/False"),_("Remove layer"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#shape_dos",_("Edit"),_("Electrical parameters"),"e",1.0,"shape_dos_switch",units_widget="QPushButton"))
lib.append(my_data("","#shape_electrical",_("Edit"),_("Electrical parameters"),"e",1.0,"shape_dos_switch",units_widget="QPushButton"))
lib.append(my_data("","#shape_optical_material",_("Edit"),_("Optical material"),"e",1.0,"gpvdm_select_material" ,units_widget="QPushButton"))
lib.append(my_data("","#shape_flip_y",_("True/False"),_("Flip y"),"e",1.0,"gtkswitch"))
#interface?.inp
lib.append(my_data("","#interface_model","type",_("Interface model"),"e",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["recombination",_("Recombination")]]))
lib.append(my_data("","#interface_eh_tau","m^{3}s^{-1}",_("Recombination constant"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#interface_model","none"]]))
1e-15
#ver
1.0
#end
#stark.inp
lib.append(my_data("","#stark_startime","s",_("startime"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_ea_factor","au",_("ea_factor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_Np","1/0",_("Np"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_den","1/0",_("den"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_externalv","V",_("externalv"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt_neg_time","s",_("dt_neg_time"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt","s",_("dt"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt_mull","au",_("dt_mull"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_stop","s",_("stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_stark","1/0",_("stark"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_lasereff","1/0",_("lasereff"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_probe_wavelength","nm",_("wavelength"),"e",1e9,"QLineEdit"))
lib.append(my_data("","#stark_sim_contacts","1/0",_("sim_contacts"),"e",1.0,"QLineEdit"))
#ref
lib.append(my_data("","#ref_url","au",_("Website"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_author","au",_("Author"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_jounral","au",_("Journal"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_title","au",_("Title"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_volume","au",_("Volume"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_pages","au",_("Pages"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_year","au",_("Year"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_DOI","au",_("DOI"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_booktitle","au",_("Book title"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_publisher","au",_("Publisher"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_isbn","au",_("ISBN"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_unformatted","au",_("Scraped text"),"e",1.0,"QLineEdit"))
#pulse
lib.append(my_data("","#Rshort_pulse","Ohms",_("R_{short}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_bias","V",_("V_{bias}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_light_efficiency","au",_("Efficiency of light"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_subtract_dc",_("True/False"),_("subtract DC"),"e",1.0,"gtkswitch"))
#mat.inp
lib.append(my_data("","#material_type","type",_("Material type"),"e",1.0,"QComboBoxLang",defaults=[[("organic"),_("Organic")],["oxide",_("Oxide")],["inorganic",_("Inorganic")],["metal",_("Metal")],["other",_("Other")]]))
lib.append(my_data("","#mat_alpha","0-1.0",_("Alpha channel"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#red_green_blue","rgb",_("Color"),"e",1.0,"QColorPicker"))
lib.append(my_data("","#mat_alpha","0-1",_("Transparency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#status","type",_("Publish material data?"),"e",1.0,"QComboBoxLang",defaults=[[("public"),_("Public")],["private",_("Private")]]))
lib.append(my_data("","#changelog","au",_("Change log"),"e",1.0,"QChangeLog"))
#jv.inp
lib.append(my_data("","#jv_step_mul","0-2.0",_("JV voltage step multiplyer"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#jv_max_j","A m^{-2}",_("Maximum current density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_light_efficiency","au",_("JV curve photon generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_pmax_n","m^{-3}",_("Average carrier density at P_{max}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_pmax_tau","m^{-1}",_("Recombination time constant"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vstart","V",_("Start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vstop","V",_("Stop voltage"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#Vstep","V",_("Voltage step"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#jv_Rcontact","V",_("Contact resistance"),"e",1.0,"QParasitic"))
lib.append(my_data("","#jv_Rshunt","V",_("Shunt resistance"),"e",1.0,"QParasitic"))
lib.append(my_data("","#jv_single_point",_("True/False"),_("Single point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#jv_use_external_voltage_as_stop",_("True/False"),_("Use external\nvoltage as stop"),"e",1.0,"gtkswitch"))
#sim_info.dat (jv plugin)
lib.append(my_data("","#voc","V",_("V_{oc}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pce","Percent",_("Power conversion efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ff","a.u.",_("Fill factor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Pmax","W m^{-2}",_("Max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#v_pmax","V",_("Voltage at max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#j_pmax","Am^{-2}",_("Current density at max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_nt","m^{-3}",_("Trapped electrons at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_pt","m^{-3}",_("Trapped holes at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_nf","m^{-3}",_("Free electrons at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_pf","m^{-3}",_("Free holes at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_np_tot","m^{-3}",_("Total carriers (n+p)/2 at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_tau","s",_("Recombination time constant at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_R","m^{-3}s^{-1}",_("Recombination rate at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_J","A m^{-2}",_("Current density at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jsc","A m^{-2}",_("J_{sc}"),"e",1.0,"QLineEdit"))
#sim_info.dat (optics plugin)
lib.append(my_data("","#light_photons_in_active_layer","m^{-2}",_("Photos absorbed in active layer"),"e",1.0,"QLineEdit"))
#object_stats.dat (optics plugin)
lib.append(my_data("object_stats.dat","#Rp[0-9]","m",_("Peak height Rp"),"e",1.0,"QLineEdit"))
lib.append(my_data("object_stats.dat","#Rq[0-9]","m",_("RMS height Rq"),"e",1.0,"QLineEdit"))
lib.append(my_data("object_stats.dat","#Ra[0-9]","m",_("Average height Ra"),"e",1.0,"QLineEdit"))
#cv?.inp
lib.append(my_data("","#cv_start_voltage","Volts",_("Start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_stop_voltage","Volts",_("Stop voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_dv_step","Volts",_("dV step"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_fx","Hz",_("Frequency"),"e",1.0,"QLineEdit"))
#sim_info.dat (equlibrium)
lib.append(my_data("","#left_holes","m^{-3}",_("Left hole density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#left_electrons","m^{-3}",_("Left electron density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#right_holes","m^{-3}",_("Right hole density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#right_electrons","m^{-3}",_("Right electron density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vbi","m^{-3}",_("Built in potential"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#electron_affinity_left","eV",_("Electron affinity left"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#electron_affinity_right","eV",_("Electron affinity right"),"e",1.0,"QLineEdit"))
#tab
lib.append(my_data("","#english_name","name",_("English name"),"e",1.0,"QLineEdit"))
#server.inp
lib.append(my_data("","#gpvdm_core_max_threads","au",_("Number of gpvdm_core threads"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#max_gpvdm_instances","au",_("Maximum number of gpvdm_core instances"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#server_stall_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_exit_on_dos_error","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_max_run_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_auto_cpus","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_min_cpus","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_steel","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#port","au","Cluster port","e",1.0,"QLineEdit"))
lib.append(my_data("","#path_to_src","au",_("Path to source code"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#path_to_libs","au",_("Path to compiled libs for cluster"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#make_command","au",_("Make command"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#exe_name","au",_("exe name"),"e",1.0,"QLineEdit"))
#cluster.inp
lib.append(my_data("","#cluster_user_name","au","User name","e",1.0,"QLineEdit"))
lib.append(my_data("","#cluster_ip","au","Cluster IP","e",1.0,"QLineEdit"))
lib.append(my_data("","#cluster_cluster_dir","au",_("Remote cluster directory"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#nodes","au",_("Remote node list"),"e",1.0,"QLineEdit"))
#triangle mesh editor
lib.append(my_data("","#mesh_gen_nx","au",_("x-triangles"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#mesh_gen_ny","au",_("y-triangles"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#mesh_gen_opp","au",_("Method"),"e",1.0,"QComboBoxLang",defaults=[["node_reduce",_("Node reduce")],["square_mesh_gen",_("No reduce")]]))
lib.append(my_data("","#shape_import_blur","width pixels",_("Gaussian blur"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_import_y_norm_percent","percent",_("Percent of histogram to ignore"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_sigma","pixels",_("Sigma of gaussian"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_offset_x","pixels",_("Gaussian offset x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_offset_y","pixels",_("Gaussian offset y"),"e",1.0,"QLineEdit"))
#honeycomb
lib.append(my_data("","#honeycomb_dx","pixels",_("dx of Honeycomb"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_dy","pixels",_("dy of Honeycomb"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_line_width","pixels",_("Line width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_x_shift","pixels",_("x shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_y_shift","pixels",_("y shift"),"e",1.0,"QLineEdit"))
#boundary
lib.append(my_data("","#image_boundary_x0","pixels",_("Boundary x0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_x1","pixels",_("Boundary x1"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_y0","pixels",_("Boundary y0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_y1","pixels",_("Boundary y1"),"e",1.0,"QLineEdit"))
#math.inp
lib.append(my_data("math.inp","#maxelectricalitt_first","au",_("Max Electrical itterations (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalclamp_first","au",_("Electrical clamp (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_electrical_error_first","au",_("Desired electrical solver error (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_enable_pos_solver",_("True/False"),_("Enable poisson solver"),"e",1.0,"gtkswitch"))
lib.append(my_data("math.inp","#maxelectricalitt","au",_("Max electrical itterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalclamp","au",_("Electrical clamp"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#posclamp","au",_("Poisson clamping"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalerror","au",_("Minimum electrical error"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#pos_max_ittr","au",_("Poisson solver max itterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#newton_clever_exit",_("True/False"),"Newton solver clever exit","e",1.0,"gtkswitch"))
lib.append(my_data("math.inp","#newton_min_itt","au",_("Newton minimum iterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#complex_solver_name",_("dll name"),_("Complex matrix solver to use"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#solver_name",_("dll name"),_("Matrix solver"),"e",1.0,"QComboBoxNewtonSelect",defaults=["umfpack","external_solver","superlu","nr_d","nr_ld"]))
lib.append(my_data("math.inp","#newton_name",_("dll name"),_("Newton solver to use"),"e",1.0,"QComboBoxNewtonSelect",defaults=["none","newton_2d","newton_simple","newton_norm","newton"]))
lib.append(my_data("math.inp","#math_t0","au",_("Slotboom T0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_d0","au",_("Slotboom D0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_n0","au",_("Slotboom n0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_newton_cache","au",_("Use newton cache (experimental)"),"e",1.0,"gtkswitch"))
#fit.inp
lib.append(my_data("fit.inp","#fit_error_mul","au",_("Fit error multiplyer"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_randomize",_("True/False"),_("Randomize fit"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_random_reset_ittr","au",_("Number of iterations between random reset"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_stall_steps","au",_("Stall steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_disable_reset_at","au",_("Disable reset at level"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_converge_error","au",_("Fit define convergence"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_enable_simple_reset","au",_("Enable simplex reset"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_enable_simple_reset","au",_("Simplex reset steps"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_method","au",_("Fiting method"),"e",1.0,"QComboBox",defaults=["simplex","newton"]))
lib.append(my_data("fit.inp","#fit_simplexmul","au",_("Start simplex step multiplication"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_simplex_reset","au",_("Simplex reset steps"),"e",1.0,"QLineEdit"))
#fit?.inp
lib.append(my_data("","#fit_subtract_lowest_point",_("True/False"),_("Subtract lowest point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#fit_set_first_point_to_zero",_("True/False"),_("Set first point to zero="),"e",1.0,"gtkswitch"))
#eqe.inp
lib.append(my_data("eqe.inp","#eqe_voltage","au",_("EQE Voltage"),"e",1.0,"QLineEdit"))
#thermal.inp
lib.append(my_data("thermal.inp","#thermal_model_type","au",_("Thermal model type"),"s",1.0,"QComboBoxLang",defaults=[["thermal_hydrodynamic",_("Hydrodynamic")],["thermal_lattice",_("Lattice heat")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Ty0","Kelvin",_("Device temperature at y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Ty0_boundry","au",_("Boundary condition for y_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_y0","W m^{-}K^{-1}",_("Conductivity of heat sink y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"],["#Ty0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_y0","m",_("Heat sink length y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"],["#Ty0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Ty1","Kelvin",_("Device temperature at y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Ty1_boundry","au",_("Boundary condition for y_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_y1","W m^{-2}K^{-1}",_("Conductivity of heat sink y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"],["#Ty1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_y1","m",_("Heat sink length y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"],["#Ty1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tx0","Kelvin",_("Device temperature at x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tx0_boundry","au",_("Boundary condition for x_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_x0","W m^{-2}K^{-1}",_("Conductivity of heat sink x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"],["#Tx0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_x0","m",_("Heat sink length x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"],["#Tx0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tx1","Kelvin",_("Device temperature at x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tx1_boundry","au",_("Boundary condition for x_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_x1","W m^{-2}K^{-1}",_("Conductivity of heat sink x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"],["#Tx1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_x1","m",_("Heat sink length x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"],["#Tx1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tz0","Kelvin",_("Device temperature at z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tz0_boundry","au",_("Boundary condition for z_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_z0","W m^{-2}K^{-1}",_("Conductivity of heat sink z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"],["#Tz0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_z0","m",_("Heat sink length z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"],["#Tz0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tz1","Kelvin",_("Device temperature at z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tz1_boundry","au",_("Boundary condition for z_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_z1","W m^{-2}K^{-1}",_("Conductivity of heat sink z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"],["#Tz1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_z1","m",_("Heat sink length z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"],["#Tz1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#thermal_l",_("True/False"),_("Lattice heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_e",_("True/False"),_("Electron heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_h",_("True/False"),_("Hole heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_tau_e","s",_("Electron relaxation time"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_tau_h","s",_("Hole relaxation time"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_kl","W m^{-1} C^{-1}",_("Thermal conductivity"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Tliso",_("True/False"),_("Isothermal boundary on left"),"e",1.0,"gtkswitch", hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Triso",_("True/False"),_("Isothermal boundary on right"),"e",1.0,"gtkswitch", hide_on_false_token=["#thermal"]))
#dump.inp
lib.append(my_data("dump.inp","#newton_dump",_("True/False"),_("Dump from newton solver"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#plot",_("True/False"),_("Plot bands etc.. "),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_band_structure","","","e",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_slices_by_time",_("True/False"),_("dump slices by time"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_slices",_("True/False"),_("Dump slices"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_dynamic",_("True/False"),_("Dump dynamic"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_zip_files",_("True/False"),_("Dump zip files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_out_band_structure",_("True/False"),_("Write out band structure"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics",_("True/False"),_("Dump optical information"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics_verbose",_("True/False"),_("Dump optics verbose"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_newtonerror",_("True/False"),_("Print newton error"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_converge",_("True/False"),_("Print solver convergence"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_converge",_("True/False"),_("Write newton solver convergence to disk"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_pos_error",_("True/False"),_("Print poisson solver convergence"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_norm_time_to_one",_("True/False"),_("Normalize output x-time to one"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_built_in_voltage",_("True/False"),_("Dump the built in voltage."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optical_probe_spectrum",_("True/False"),_("Dump optical probe spectrum"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics_summary",_("True/False"),_("Dump optical summary"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_ray_trace_map",_("True/False"),_("Dump raytrace plots"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dumpitdos","","","e",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_workbook",_("True/False"),_("Dump an excel workbook for each simulation run congaing the results."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_file_access_log",_("True/False"),_("Write file access log to disk."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_use_cache",_("True/False"),_("Use cache for file writes"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_headers",_("True/False"),_("Write headers to output files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_first_guess",_("True/False"),_("Write first guess to equations"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_log_level","au",_("Log verbocity"),"s",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["screen",_("Screen")],["disk",_("Disk")],["screen_and_disk",_("Screen and disk")]]))
lib.append(my_data("dump.inp","#dump_log_level","au",_("Log verbocity"),"s",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["screen",_("Screen")],["disk",_("Disk")],["screen_and_disk",_("Screen and disk")]]))
lib.append(my_data("dump.inp","#dump_dynamic_pl_energy","au",_("PL dump Energy"),"s",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_remove_dos_cache",_("True/False"),_("Clean up DoS cache files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_verbose_electrical_solver_results",_("True/False"),_("Dump verbose electrical solver results"),"e",1.0,"gtkswitch"))
#pl_ss?.inp
lib.append(my_data("","#pl_mode","au",_("Device state"),"s",1.0,"QComboBoxLang",defaults=[[("voc"),_("Voc")],["Jsc",_("Jsc")]]))
#ray
lib.append(my_data("ray.inp","#ray_wavelength_points","au",_("Wavelength points"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_auto_run","au",_("Run the ray tracer"),"s",1.0,"QComboBoxLang",defaults=[[("ray_run_never"),_("Never")],["ray_run_once",_("Once per simulation")],["ray_run_step",_("Each simulation step")]]))
lib.append(my_data("ray.inp","#ray_theta_steps","au",_("Theta steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_theta_start","Degrees",_("Theta start"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_theta_stop","Degrees",_("Theta stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_steps","au",_("Phi steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_start","Degrees",_("Phi start"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_stop","Degrees",_("Phi stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_escape_bins","au",_("Escape bins"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_auto_wavelength_range",_("True/False"),_("Automatic wavelength range"),"e",1.0,"gtkswitch"))
lib.append(my_data("ray.inp","#ray_lambda_start","nm",_("Start wavelength"),"e",1.0,"QLineEdit",hide_on_true_token="#ray_auto_wavelength_range"))
lib.append(my_data("ray.inp","#ray_lambda_stop","nm",_("Stop wavelength"),"e",1.0,"QLineEdit",hide_on_true_token="#ray_auto_wavelength_range"))
lib.append(my_data("ray.inp","#ray_emission_source","au",_("Emit from"),"s",1.0,"QComboBoxLang",defaults=[[("ray_emission_electrical_mesh"),_("Each electrical mesh point")],["ray_emission_single_point",_("Center of each layer")]]))
#viewpoint.inp
lib.append(my_data("view_point.inp","#viewpoint_enabled",_("True/False"),_("Enable viewpoint"),"e",1.0,"gtkswitch"))
lib.append(my_data("view_point.inp","#viewpoint_size","au",_("View point size"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_dz","au",_("View point dz"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_nx","au",_("Mesh points x"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_nz","au",_("Mesh points z"),"e",1.0,"QLineEdit"))
#led.inp
lib.append(my_data("","#led_extract_eff","0.0-1.0",_("LED extraction efficiency"),"e",1.0,"QLineEdit"))
#device.inp
#lib.append(my_data("","#invert_applied_bias","au",_("Invert applied bias"),"e",1.0,"gtkswitch"))
#lib.append(my_data("","#lcharge","m^{-3}",_("Charge on left contact"),"e",1.0,"QLineEdit"))
#lib.append(my_data("","#rcharge","m^{-3}",_("Charge on right contact"),"e",1.0,"QLineEdit"))
#parasitic.inp
lib.append(my_data("parasitic.inp","#Rshunt","Ohms m^{2}",_("Shunt resistance"),"e",1.0,"QLineEdit",min=1e-3,max=1e6))
lib.append(my_data("parasitic.inp","#Rcontact","Ohms",_("Series resistance"),"e",1.0,"QLineEdit",min=1.0,max=200))
lib.append(my_data("parasitic.inp","#otherlayers","m",_("Other layers"),"e",1.0,"QLineEdit"))
lib.append(my_data("parasitic.inp","#test_param","m",_("debug (ignore)"),"e",1.0,"QLineEdit",hidden=True))
#mesh?.inp
lib.append(my_data("","#remesh_enable","au",_("Automatic remesh"),"e",1.0,"gtkswitch"))
#lib.append(my_data("mesh_y.inp","#mesh_layer_points0","s","Mesh points y0","e",1.0,"QLineEdit"))
#lib.append(my_data("mesh_y.inp","#mesh_layer_points1","s","Mesh points y1","e",1.0,"QLineEdit"))
#pl?.inp
lib.append(my_data("","#pl_enabled",_("True/False"),_("Turn on luminescence"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#pl_fe_fh","0.0-1.0",_("n_{free} to p_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_fe_te","0.0-1.0",_("n_{free} to n_{trap} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_te_fh","0.0-1.0",_("n_{trap} to p_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_th_fe","0.0-1.0",_("p_{trap} to n_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_fh_th","0.0-1.0",_("p_{free} to p_{trap} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_input_spectrum",_("Edit"),_("Experimental emission spectra"),"e",1.0,"gpvdm_select_emission" ,units_widget="QPushButton", hide_on_false_token=["#pl_use_experimental_emission_spectra"]))
lib.append(my_data("","#pl_experimental_emission_efficiency","0.0-1.0",_("Experimental emission efficiency"),"e",1.0,"QLineEdit", hide_on_false_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_emission_enabled",_("True/False"),_("Emission enabled from this layer"),"e",1.0,"gtkswitch"))
#pl_experimental_emission_efficiency
lib.append(my_data("","#pl_use_experimental_emission_spectra",_("True/False"),_("Use experimental emission spectra"),"e",1.0,"gtkswitch"))
#fxdomain?.inp
lib.append(my_data("","#fxdomain_Rload","Ohms",_("Load resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_points","au",_("fx domain mesh points"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fxdomain_n","au",_("Cycles to simulate"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fxdomain_voltage_modulation_max","V",_("Voltage modulation depth"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fx_modulation_type","optical"],["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fx_modulation_type","au",_("Excite with"),"e",1.0,"QComboBoxLang",defaults=[[("voltage"),_("Voltage")],[("optical"),_("Light")]]))
lib.append(my_data("","#fxdomain_measure","au",_("Measure"),"e",1.0,"QComboBoxLang",defaults=[[("measure_voltage"),_("Voltage")],[("measure_current"),_("Current")]]))
lib.append(my_data("","#fxdomain_light_modulation_depth","au",_("Light modulation depth"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fx_modulation_type","voltage"]]))
lib.append(my_data("","#fxdomain_do_fit","au",_("Run fit after simulation"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"],["#fxdomain_large_signal","fourier"]]))
lib.append(my_data("","#periods_to_fit","au",_("Periods to fit"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"],["#fxdomain_large_signal","fourier"]]))
lib.append(my_data("","#fxdomain_r","",_("Re(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_i","V",_("Im(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_Jr","Am^{-2}",_("Re(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_Ji","Am^{-2}",_("Im(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_fx","Hz",_("fx"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_i","s",_("di"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_g","s",_("dmodulation"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_phase","rads",_("dphase"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_large_signal","au",_("Simulation type"),"e",1.0,"QComboBoxLang",defaults=[[("large_signal"),_("Large signal")],[("fourier"),_("Fourier")]])) #,[("small_signal"),_("Small signal")]
#is?.inp
lib.append(my_data("","#is_Vexternal","Volts",_("V_{external}"),"e",1.0,"QLineEdit"))
#node_list.inp
lib.append(my_data("","#node_list","au",_("Node list"),"e",1.0,"QChangeLog"))
#crypto.inp
lib.append(my_data("","#iv","au",_("Initialization vector"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#key","au",_("Cryptographic key"),"e",1.0,"QLineEdit"))
#lumo?.inp
lib.append(my_data("","#function_\d+","au","Function","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_enable_\d+","au","Enabled","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_a_\d+","au","a","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_b_\d+","au","b","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_c_\d+","au","c","e",1.0,"QLineEdit"))
lib.append(my_data("","#Psun","Sun",_("Intensity of the sun"),"e",1.0,"QLineEdit",hidden=True))
lib.append(my_data("","#saturation_n0","#saturation_n0",_("#saturation_n0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#saturation_rate","#saturation_rate",_("#saturation_rate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_saturate","#imps_saturate",_("#imps_saturate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simplephotondensity","m^{-2}s^{-1}",_("Photon density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simple_alpha","m^{-1}",_("Absorption of material"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simmode","au",_("#simmode"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#meshpoints","au",_("Mesh points (x)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#function","au",_("#function"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vexternal","V",_("start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vmax","V",_("Max voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Eg","eV",_("Eg"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Xi","eV",_("Xi"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#start_stop_time","s",_("Time of pause"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stopstart","1/0",_("Pause between iterations"),"e",1.0,"QComboBox",defaults=["1","0"]))
lib.append(my_data("","#invert_current",_("True/False"),_("Invert output"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#use_capacitor","1/0",_("Use capacitor"),"e",1.0,"QComboBox",defaults=["1","0"]))
#
lib.append(my_data("","#Rshort_imps","Ohms",_("R_{short}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_sun","1=1 Sun",_("Backgroud light bias"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_modulation_max","1=1 Sun",_("Modulation depth"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_modulation_fx","Hz",_("Modulation frequency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#high_sun_scale","au",_("High light multiplyer"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_r","Amps",_("Re(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_i","Amps",_("Im(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Jr","Amps $m^{-2}$",_("Re(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Ji","Amps $m^{-2}$",_("Im(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_fx","Hz",_("Frequency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_i","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_g","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_phase","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_points","s",_("points"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_n","s",_("Wavelengths to simulate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Vexternal","Volts",_("External voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Cext","C",_("External C"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Rext","Ohms",_("External R"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Rscope","Ohms",_("Resistance of scope"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_bands","bands",_("Number of traps"),"s",1.0,"QLineEdit"))
#suns_voc
lib.append(my_data("","#sun_voc_single_point","True/False",_("Single point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#sun_voc_Psun_start","Suns",_("Start intensity"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#sun_voc_Psun_stop","Suns",_("Stop intensity"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#sun_voc_Psun_mul","au",_("step multiplier"),"e",1.0,"QLineEdit"))
#suns_jsc
lib.append(my_data("suns_voc.inp","#sunstart","Suns",_("Start intensity"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sunstop","Suns",_("Stop intensity"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sundp","au",_("Step"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sundpmul","au",_("step multiplier"), "e",1.0,"QLineEdit"))
lib.append(my_data("","#simplephotondensity","m^{-2}s^{-1}",_("Photon Flux"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simple_alpha","m^{-1}",_("Absorption"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#xlen","m",_("device width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#zlen","m",_("device breadth"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ver","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#dostype","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#me","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#mh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#gendos","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#notused","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#notused","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tpoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#npoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#npoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhbands","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_start","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhvth_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhvth_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_cut","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#lumodelstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#lumodelstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#homodelstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#homodelstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#gaus_mull","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Esteps","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rshort","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Dphoton","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#interfaceleft","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#interfaceright","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#phibleft","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#phibright","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vl_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vl_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vr_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vr_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_model","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#NDfilter","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#plottime","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#startstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#plotfile","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rshort","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#solve_at_Vbi","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#remesh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#newmeshsize","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#epitaxy","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#alignmesh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_start_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_J_to_Jr","au","Ratio of conduction current to recombination current","e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_i","au",_("Current"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#kl_in_newton","1/0",_("Solve Kirchhoff's current law in Newton solver"),"e",1.0,"QComboBox",defaults=["1","0"]))
lib.append(my_data("","#simplexmul","au","simplex mull","e",1.0,"QLineEdit"))
lib.append(my_data("","#simplex_reset","au","Reset steps","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nfree_to_ptrap","m^{-3}s^{-1}","nfree_to_ptrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_pfree_to_ntrap","m^{-3}s^{-1}","max_pfree_to_ntrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nrelax","m^{-3}s^{-1}","max_nrelax","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_prelax","m^{-3}s^{-1}","max_prelax","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nfree","m^{-3}","max_nfree","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_pfree","m^{-3}","max_pfree","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_ntrap","m^{-3}","max_ntrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_ptrap","m^{-3}","max_ptrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_max_reduction","m^{-1}","alpha_max_reduction","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_max_increase","m^{-1}","alpha_max_increase","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r1","m^{-3}s^{-1}","srh electron rate 1","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r2","m^{-3}s^{-1}","srh electron rate 2","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r3","m^{-3}s^{-1}","srh electron rate 3","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r4","m^{-3}s^{-1}","srh electron rate 4","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r1","m^{-3}s^{-1}","srh hole rate 1","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r2","m^{-3}s^{-1}","srh hole rate 2","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r3","m^{-3}s^{-1}","srh hole rate 3","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r4","m^{-3}s^{-1}","srh hole rate 4","e",1.0,"QLineEdit"))
lib.append(my_data("","#band_bend_max","percent","band bend max","e",1.0,"QLineEdit"))
#config.inp
lib.append(my_data("","#gui_config_3d_enabled",_("True/False"),_("Enable 3d effects"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gui_use_icon_theme",_("True/False"),_("Use icons from OS"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#matlab_interpreter",_("au"),_("Matlab interpreter"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gl_render_grid",_("True/False"),_("Render grid"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gl_bg_color",_("rgb"),_("Color used for 3d background"),"e",1.0,"QColorPicker"))
lib.append(my_data("","#gl_render_text",_("au"),_("Render text in 3d"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gl_device_height",_("au"),_("Device Heigh (display only)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gl_dy_layer_offset",_("au"),_("Layer offset (display only)"),"e",1.0,"QLineEdit"))
#fit
lib.append(my_data("","#time_shift","s","time shift","e",1.0,"QLineEdit"))
lib.append(my_data("","#start","s","start","e",1.0,"QLineEdit"))
lib.append(my_data("","#stop","s","stop","e",1.0,"QLineEdit"))
lib.append(my_data("","#log_x",_("True/False"),_("log x"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#log_y",_("True/False"),_("log y"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#sim_data",_("filename"),"Fit file name","e",1.0,"QLineEdit"))
lib.append(my_data("","#fit_invert_simulation_y",_("True/False"),_("Invert simulated data (y)"),"e",1.0,"gtkswitch"))
#epitaxy.inp
lib.append(my_data("epitaxy.inp","#layer_width0","nm","start","e",1e9,"QLineEdit"))
lib.append(my_data("epitaxy.inp","#layer_width1","nm","start","e",1e9,"QLineEdit"))
lib.append(my_data("epitaxy.inp","#layer_width2","nm","start","e",1e9,"QLineEdit"))
#
lib.append(my_data("","#layer0","m",_("Active layer width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_saturate","au","Stark saturate","e",1.0,"QLineEdit"))
lib.append(my_data("","#n_mul","au","n mul","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_mul","m^{-1}","Alpha mul","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point0","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point1","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point2","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point3","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point4","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_subtracted_value","s","subtracted value","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_energy","eV","Energy","e",1.0,"QLineEdit"))
lib.append(my_data("","#sim_id","au","sim id","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rload","Ohms",_("External load resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_shift","s","Shift of TPC signal","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_illuminate_from","au",_("Illuminate from"),"e",1.0,"QComboBoxLang",defaults=[[("top"),_("Top")],[("bottom"),_("Bottom")]]))
#time_mesh_config*.inp
lib.append(my_data("","#fs_laser_time","s","Laser start time","e",1.0,"QLineEdit"))
#fdtd.inp
lib.append(my_data("fdtd.inp","#use_gpu","au",_("OpenCL GPU acceleration"),"e",1.0,"gtkswitch"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_start","m",_("Start wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_stop","m",_("Stop wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_points","m",_("Wavelength steps"),"e",1.0,"QLineEdit"))
#any files
lib.append(my_data("","#dump_verbosity","au",_("Ouput verbosity to disk"),"e",1.0,"QComboBoxLang",defaults=[["0",_("Key results")],[("1"),_("Write everything to disk")],[("2"),_("Write everything to disk every 2nd step")],[("5"),_("Write everything to disk every 5th step")],[("10"),_("Write everything to disk every 10th step")]]))
lib.append(my_data("","#dump_screen_verbosity", "au", _("Ouput verbosity to screen"),"e",1.0,"QComboBoxLang",defaults=[[("dump_verbosity_everything"),_("Show lots")],["dump_verbosity_key_results",_("Show key results")]]))
#circuit diagram
lib.append(my_data("","#resistance","Ohms",_("Resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#capacitance","F",_("Capacitor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#inductance","H",_("Inductance"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#J0","Apms m^{-2}",_("J0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#nid","(a.u.)",_("Ideality factor"),"e",1.0,"QLineEdit"))
class tokens:
def __init__(self):
global lib
if len(lib)==0:
build_token_lib()
def find(self,token):
global lib
search_token=token.strip()
if search_token.startswith("#"):
search_token=search_token[1:]
for i in range(0, len(lib)):
if bool(re.match(lib[i].token[1:]+"$",search_token))==True:
if lib[i].units=="" and lib[i].info=="":
return False
else:
return lib[i]
#sys.stdout.write("Add -> lib.append(my_data(\""+token+"\",\"\",\"\",[\"text\"]))\n")
return False
def dump_lib(self):
global lib
for i in range(0, len(lib)):
print(">",lib[i].token,"<>",lib[i].info,"<")
def get_lib(self):
global lib
return lib
| gpl-2.0 | -7,389,480,505,478,320,000 | 71.82439 | 357 | 0.624824 | false |
znes/renpass_gis | renpass/components/electrical.py | 1 | 4361 | # -*- coding: utf-8 -*-
""" This module is designed to contain classes that act as simplified / reduced
energy specific interfaces (facades) for solph components to simplify its
application and work with the oemof datapackage - reader functionality
SPDX-License-Identifier: GPL-3.0-or-later
"""
import logging
from pyomo.core.base.block import SimpleBlock
from pyomo.environ import Var, Constraint, Set, BuildAction
from oemof.network import Node, Edge, Transformer
from oemof.solph import Flow, Bus
from oemof.solph.plumbing import sequence
from renpass.facades import Facade
class ElectricalBus(Bus):
"""
Parameters
-----------
slack: boolean
True if object is slack bus of network
v_max: numeric
Maximum value of voltage angle at electrical bus
v_min: numeric
Mininum value of voltag angle at electrical bus
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.slack = kwargs.get('slack', False)
self.v_max = kwargs.get('v_max', 1000)
self.v_min = kwargs.get('v_min', -1000)
class Line(Facade, Flow):
"""
Paramters
---------
from_bus: ElectricalBus object
Bus where the input of the Line object is connected to
to_bus: ElectricalBus object
Bus where the output of the Line object is connected to
reactance: numeric
Reactance of Line object
capacity: numeric
Capacity of the Line object
capacity_cost: numeric
Cost of capacity for 1 Unit of capacity
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.from_bus = kwargs.get('from_bus')
self.to_bus = kwargs.get('to_bus')
self.reactance = sequence(kwargs.get('reactance', 0.00001))
self.capacity = kwargs.get('capacity')
self.capacity_cost = kwargs.get('capacity_cost')
# oemof related attribute setting of 'Flow-object'
self.input = self.from_bus
self.output = self.to_bus
self.bidirectional = True
self.nominal_value = self.capacity
self.min = sequence(-1)
self.investment = self._investment()
def constraint_group(self):
return ElectricalLineConstraints
class ElectricalLineConstraints(SimpleBlock):
"""
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
"""
if group is None:
return None
m = self.parent_block()
# create voltage angle variables
self.ELECTRICAL_BUSES = Set(initialize=[n for n in m.es.nodes
if isinstance(n, ElectricalBus)])
def _voltage_angle_bounds(block, b, t):
return b.v_min, b.v_max
self.voltage_angle = Var(self.ELECTRICAL_BUSES, m.TIMESTEPS,
bounds=_voltage_angle_bounds)
if True not in [b.slack for b in self.ELECTRICAL_BUSES]:
# TODO: Make this robust to select the same slack bus for
# the same problems
bus = [b for b in self.ELECTRICAL_BUSES][0]
logging.info(
"No slack bus set,setting bus {0} as slack bus".format(
bus.label))
bus.slack = True
def _voltage_angle_relation(block):
for t in m.TIMESTEPS:
for n in group:
if n.input.slack is True:
self.voltage_angle[n.output, t].value = 0
self.voltage_angle[n.output, t].fix()
try:
lhs = m.flow[n.input, n.output, t]
rhs = 1 / n.reactance[t] * (
self.voltage_angle[n.input, t] -
self.voltage_angle[n.output, t])
except:
raise ValueError("Error in constraint creation",
"of node {}".format(n.label))
block.electrical_flow.add((n, t), (lhs == rhs))
self.electrical_flow = Constraint(group, m.TIMESTEPS, noruleinit=True)
self.electrical_flow_build = BuildAction(
rule=_voltage_angle_relation)
| gpl-3.0 | -7,568,780,707,150,623,000 | 30.15 | 79 | 0.569136 | false |
tylerclair/py3canvas | py3canvas/apis/accounts.py | 1 | 26185 | """Accounts API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class AccountsAPI(BaseCanvasAPI):
"""Accounts API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AccountsAPI."""
super(AccountsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.AccountsAPI")
def list_accounts(self, include=None):
"""
List accounts.
List accounts that the current user can view or manage. Typically,
students and even teachers will get an empty list in response, only
account admins can view the accounts that they are in.
"""
path = {}
data = {}
params = {}
# OPTIONAL - include
"""Array of additional information to include.
"lti_guid":: the 'tool_consumer_instance_guid' that will be sent for this account on LTI launches
"registration_settings":: returns info about the privacy policy and terms of use
"services":: returns services and whether they are enabled (requires account management permissions)"""
if include is not None:
self._validate_enum(include, ["lti_guid", "registration_settings", "services"])
params["include"] = include
self.logger.debug("GET /api/v1/accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts".format(**path), data=data, params=params, all_pages=True)
def list_accounts_for_course_admins(self):
"""
List accounts for course admins.
List accounts that the current user can view through their admin course enrollments.
(Teacher, TA, or designer enrollments).
Only returns "id", "name", "workflow_state", "root_account_id" and "parent_account_id"
"""
path = {}
data = {}
params = {}
self.logger.debug("GET /api/v1/course_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/course_accounts".format(**path), data=data, params=params, all_pages=True)
def get_single_account(self, id):
"""
Get a single account.
Retrieve information on an individual account, given by id or sis
sis_account_id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/accounts/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{id}".format(**path), data=data, params=params, single_item=True)
def get_sub_accounts_of_account(self, account_id, recursive=None):
"""
Get the sub-accounts of an account.
List accounts that are sub-accounts of the given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - recursive
"""If true, the entire account tree underneath
this account will be returned (though still paginated). If false, only
direct sub-accounts of this account will be returned. Defaults to false."""
if recursive is not None:
params["recursive"] = recursive
self.logger.debug("GET /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, all_pages=True)
def list_active_courses_in_account(self, account_id, blueprint=None, blueprint_associated=None, by_subaccounts=None, by_teachers=None, completed=None, enrollment_term_id=None, enrollment_type=None, hide_enrollmentless_courses=None, include=None, order=None, published=None, search_by=None, search_term=None, sort=None, state=None, with_enrollments=None):
"""
List active courses in an account.
Retrieve the list of courses in this account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - with_enrollments
"""If true, include only courses with at least one enrollment. If false,
include only courses with no enrollments. If not present, do not filter
on course enrollment status."""
if with_enrollments is not None:
params["with_enrollments"] = with_enrollments
# OPTIONAL - enrollment_type
"""If set, only return courses that have at least one user enrolled in
in the course with one of the specified enrollment types."""
if enrollment_type is not None:
self._validate_enum(enrollment_type, ["teacher", "student", "ta", "observer", "designer"])
params["enrollment_type"] = enrollment_type
# OPTIONAL - published
"""If true, include only published courses. If false, exclude published
courses. If not present, do not filter on published status."""
if published is not None:
params["published"] = published
# OPTIONAL - completed
"""If true, include only completed courses (these may be in state
'completed', or their enrollment term may have ended). If false, exclude
completed courses. If not present, do not filter on completed status."""
if completed is not None:
params["completed"] = completed
# OPTIONAL - blueprint
"""If true, include only blueprint courses. If false, exclude them.
If not present, do not filter on this basis."""
if blueprint is not None:
params["blueprint"] = blueprint
# OPTIONAL - blueprint_associated
"""If true, include only courses that inherit content from a blueprint course.
If false, exclude them. If not present, do not filter on this basis."""
if blueprint_associated is not None:
params["blueprint_associated"] = blueprint_associated
# OPTIONAL - by_teachers
"""List of User IDs of teachers; if supplied, include only courses taught by
one of the referenced users."""
if by_teachers is not None:
params["by_teachers"] = by_teachers
# OPTIONAL - by_subaccounts
"""List of Account IDs; if supplied, include only courses associated with one
of the referenced subaccounts."""
if by_subaccounts is not None:
params["by_subaccounts"] = by_subaccounts
# OPTIONAL - hide_enrollmentless_courses
"""If present, only return courses that have at least one enrollment.
Equivalent to 'with_enrollments=true'; retained for compatibility."""
if hide_enrollmentless_courses is not None:
params["hide_enrollmentless_courses"] = hide_enrollmentless_courses
# OPTIONAL - state
"""If set, only return courses that are in the given state(s). By default,
all states but "deleted" are returned."""
if state is not None:
self._validate_enum(state, ["created", "claimed", "available", "completed", "deleted", "all"])
params["state"] = state
# OPTIONAL - enrollment_term_id
"""If set, only includes courses from the specified term."""
if enrollment_term_id is not None:
params["enrollment_term_id"] = enrollment_term_id
# OPTIONAL - search_term
"""The partial course name, code, or full ID to match and return in the results list. Must be at least 3 characters."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - include
"""- All explanations can be seen in the {api:CoursesController#index Course API index documentation}
- "sections", "needs_grading_count" and "total_scores" are not valid options at the account level"""
if include is not None:
self._validate_enum(include, ["syllabus_body", "term", "course_progress", "storage_quota_used_mb", "total_students", "teachers"])
params["include"] = include
# OPTIONAL - sort
"""The column to sort results by."""
if sort is not None:
self._validate_enum(sort, ["course_name", "sis_course_id", "teacher", "subaccount", "enrollments"])
params["sort"] = sort
# OPTIONAL - order
"""The order to sort the given column by."""
if order is not None:
self._validate_enum(order, ["asc", "desc"])
params["order"] = order
# OPTIONAL - search_by
"""The filter to search by. "course" searches for course names, course codes,
and SIS IDs. "teacher" searches for teacher names"""
if search_by is not None:
self._validate_enum(search_by, ["course", "teacher"])
params["search_by"] = search_by
self.logger.debug("GET /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, all_pages=True)
def update_account(self, id, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_time_zone=None, account_default_user_storage_quota_mb=None, account_name=None, account_services=None, account_settings_lock_all_announcements_locked=None, account_settings_lock_all_announcements_value=None, account_settings_restrict_student_future_listing_locked=None, account_settings_restrict_student_future_listing_value=None, account_settings_restrict_student_future_view_locked=None, account_settings_restrict_student_future_view_value=None, account_settings_restrict_student_past_view_locked=None, account_settings_restrict_student_past_view_value=None, account_sis_account_id=None):
"""
Update an account.
Update an existing account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - account[name]
"""Updates the account name"""
if account_name is not None:
data["account[name]"] = account_name
# OPTIONAL - account[sis_account_id]
"""Updates the account sis_account_id
Must have manage_sis permission and must not be a root_account."""
if account_sis_account_id is not None:
data["account[sis_account_id]"] = account_sis_account_id
# OPTIONAL - account[default_time_zone]
"""The default time zone of the account. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if account_default_time_zone is not None:
data["account[default_time_zone]"] = account_default_time_zone
# OPTIONAL - account[default_storage_quota_mb]
"""The default course storage quota to be used, if not otherwise specified."""
if account_default_storage_quota_mb is not None:
data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb
# OPTIONAL - account[default_user_storage_quota_mb]
"""The default user storage quota to be used, if not otherwise specified."""
if account_default_user_storage_quota_mb is not None:
data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb
# OPTIONAL - account[default_group_storage_quota_mb]
"""The default group storage quota to be used, if not otherwise specified."""
if account_default_group_storage_quota_mb is not None:
data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb
# OPTIONAL - account[settings][restrict_student_past_view][value]
"""Restrict students from viewing courses after end date"""
if account_settings_restrict_student_past_view_value is not None:
data["account[settings][restrict_student_past_view][value]"] = account_settings_restrict_student_past_view_value
# OPTIONAL - account[settings][restrict_student_past_view][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_past_view_locked is not None:
data["account[settings][restrict_student_past_view][locked]"] = account_settings_restrict_student_past_view_locked
# OPTIONAL - account[settings][restrict_student_future_view][value]
"""Restrict students from viewing courses before start date"""
if account_settings_restrict_student_future_view_value is not None:
data["account[settings][restrict_student_future_view][value]"] = account_settings_restrict_student_future_view_value
# OPTIONAL - account[settings][restrict_student_future_view][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_future_view_locked is not None:
data["account[settings][restrict_student_future_view][locked]"] = account_settings_restrict_student_future_view_locked
# OPTIONAL - account[settings][lock_all_announcements][value]
"""Disable comments on announcements"""
if account_settings_lock_all_announcements_value is not None:
data["account[settings][lock_all_announcements][value]"] = account_settings_lock_all_announcements_value
# OPTIONAL - account[settings][lock_all_announcements][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_lock_all_announcements_locked is not None:
data["account[settings][lock_all_announcements][locked]"] = account_settings_lock_all_announcements_locked
# OPTIONAL - account[settings][restrict_student_future_listing][value]
"""Restrict students from viewing future enrollments in course list"""
if account_settings_restrict_student_future_listing_value is not None:
data["account[settings][restrict_student_future_listing][value]"] = account_settings_restrict_student_future_listing_value
# OPTIONAL - account[settings][restrict_student_future_listing][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_future_listing_locked is not None:
data["account[settings][restrict_student_future_listing][locked]"] = account_settings_restrict_student_future_listing_locked
# OPTIONAL - account[services]
"""Give this a set of keys and boolean values to enable or disable services matching the keys"""
if account_services is not None:
data["account[services]"] = account_services
self.logger.debug("PUT /api/v1/accounts/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{id}".format(**path), data=data, params=params, single_item=True)
def delete_user_from_root_account(self, user_id, account_id):
"""
Delete a user from the root account.
Delete a user record from a Canvas root account. If a user is associated
with multiple root accounts (in a multi-tenant instance of Canvas), this
action will NOT remove them from the other accounts.
WARNING: This API will allow a user to remove themselves from the account.
If they do this, they won't be able to make API calls or log into Canvas at
that account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("DELETE /api/v1/accounts/{account_id}/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/users/{user_id}".format(**path), data=data, params=params, single_item=True)
def create_new_sub_account(self, account_id, account_name, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_user_storage_quota_mb=None, account_sis_account_id=None):
"""
Create a new sub-account.
Add a new sub-account to a given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - account[name]
"""The name of the new sub-account."""
data["account[name]"] = account_name
# OPTIONAL - account[sis_account_id]
"""The account's identifier in the Student Information System."""
if account_sis_account_id is not None:
data["account[sis_account_id]"] = account_sis_account_id
# OPTIONAL - account[default_storage_quota_mb]
"""The default course storage quota to be used, if not otherwise specified."""
if account_default_storage_quota_mb is not None:
data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb
# OPTIONAL - account[default_user_storage_quota_mb]
"""The default user storage quota to be used, if not otherwise specified."""
if account_default_user_storage_quota_mb is not None:
data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb
# OPTIONAL - account[default_group_storage_quota_mb]
"""The default group storage quota to be used, if not otherwise specified."""
if account_default_group_storage_quota_mb is not None:
data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb
self.logger.debug("POST /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, single_item=True)
class Account(BaseModel):
"""Account Model."""
def __init__(self, integration_id=None, default_time_zone=None, uuid=None, default_storage_quota_mb=None, sis_account_id=None, root_account_id=None, default_group_storage_quota_mb=None, id=None, sis_import_id=None, lti_guid=None, workflow_state=None, default_user_storage_quota_mb=None, parent_account_id=None, name=None):
"""Init method for Account class."""
self._integration_id = integration_id
self._default_time_zone = default_time_zone
self._uuid = uuid
self._default_storage_quota_mb = default_storage_quota_mb
self._sis_account_id = sis_account_id
self._root_account_id = root_account_id
self._default_group_storage_quota_mb = default_group_storage_quota_mb
self._id = id
self._sis_import_id = sis_import_id
self._lti_guid = lti_guid
self._workflow_state = workflow_state
self._default_user_storage_quota_mb = default_user_storage_quota_mb
self._parent_account_id = parent_account_id
self._name = name
self.logger = logging.getLogger('py3canvas.Account')
@property
def integration_id(self):
"""The account's identifier in the Student Information System. Only included if the user has permission to view SIS information."""
return self._integration_id
@integration_id.setter
def integration_id(self, value):
"""Setter for integration_id property."""
self.logger.warn("Setting values on integration_id will NOT update the remote Canvas instance.")
self._integration_id = value
@property
def default_time_zone(self):
"""The default time zone of the account. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
return self._default_time_zone
@default_time_zone.setter
def default_time_zone(self, value):
"""Setter for default_time_zone property."""
self.logger.warn("Setting values on default_time_zone will NOT update the remote Canvas instance.")
self._default_time_zone = value
@property
def uuid(self):
"""The UUID of the account."""
return self._uuid
@uuid.setter
def uuid(self, value):
"""Setter for uuid property."""
self.logger.warn("Setting values on uuid will NOT update the remote Canvas instance.")
self._uuid = value
@property
def default_storage_quota_mb(self):
"""The storage quota for the account in megabytes, if not otherwise specified."""
return self._default_storage_quota_mb
@default_storage_quota_mb.setter
def default_storage_quota_mb(self, value):
"""Setter for default_storage_quota_mb property."""
self.logger.warn("Setting values on default_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_storage_quota_mb = value
@property
def sis_account_id(self):
"""The account's identifier in the Student Information System. Only included if the user has permission to view SIS information."""
return self._sis_account_id
@sis_account_id.setter
def sis_account_id(self, value):
"""Setter for sis_account_id property."""
self.logger.warn("Setting values on sis_account_id will NOT update the remote Canvas instance.")
self._sis_account_id = value
@property
def root_account_id(self):
"""The ID of the root account, or null if this is the root account."""
return self._root_account_id
@root_account_id.setter
def root_account_id(self, value):
"""Setter for root_account_id property."""
self.logger.warn("Setting values on root_account_id will NOT update the remote Canvas instance.")
self._root_account_id = value
@property
def default_group_storage_quota_mb(self):
"""The storage quota for a group in the account in megabytes, if not otherwise specified."""
return self._default_group_storage_quota_mb
@default_group_storage_quota_mb.setter
def default_group_storage_quota_mb(self, value):
"""Setter for default_group_storage_quota_mb property."""
self.logger.warn("Setting values on default_group_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_group_storage_quota_mb = value
@property
def id(self):
"""the ID of the Account object."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def sis_import_id(self):
"""The id of the SIS import if created through SIS. Only included if the user has permission to manage SIS information."""
return self._sis_import_id
@sis_import_id.setter
def sis_import_id(self, value):
"""Setter for sis_import_id property."""
self.logger.warn("Setting values on sis_import_id will NOT update the remote Canvas instance.")
self._sis_import_id = value
@property
def lti_guid(self):
"""The account's identifier that is sent as context_id in LTI launches."""
return self._lti_guid
@lti_guid.setter
def lti_guid(self, value):
"""Setter for lti_guid property."""
self.logger.warn("Setting values on lti_guid will NOT update the remote Canvas instance.")
self._lti_guid = value
@property
def workflow_state(self):
"""The state of the account. Can be 'active' or 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def default_user_storage_quota_mb(self):
"""The storage quota for a user in the account in megabytes, if not otherwise specified."""
return self._default_user_storage_quota_mb
@default_user_storage_quota_mb.setter
def default_user_storage_quota_mb(self, value):
"""Setter for default_user_storage_quota_mb property."""
self.logger.warn("Setting values on default_user_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_user_storage_quota_mb = value
@property
def parent_account_id(self):
"""The account's parent ID, or null if this is the root account."""
return self._parent_account_id
@parent_account_id.setter
def parent_account_id(self, value):
"""Setter for parent_account_id property."""
self.logger.warn("Setting values on parent_account_id will NOT update the remote Canvas instance.")
self._parent_account_id = value
@property
def name(self):
"""The display name of the account."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
| mit | -5,909,541,274,754,134,000 | 46.010772 | 722 | 0.652015 | false |
akalipetis/djoser | testproject/testapp/tests/social/test_provider_auth.py | 1 | 3270 | from django.contrib.sessions.middleware import SessionMiddleware
from django.utils import six
from rest_framework import status
from djet import assertions, restframework
import djoser.social.views
from social_core.exceptions import AuthException
from ..common import create_user, mock
class ProviderAuthViewTestCase(restframework.APIViewTestCase,
assertions.StatusCodeAssertionsMixin):
view_class = djoser.social.views.ProviderAuthView
middleware = [SessionMiddleware]
def test_get_facebook_provider_fails_if_no_redirect_uri(self):
request = self.factory.get()
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
def test_get_facebook_provider_fails_if_wrong_redirect_uri(self):
request = self.factory.get(data={'redirect_uri': 'http://yolo.com/'})
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
def test_get_facebook_provider_provides_valid_authorization_url(self):
request = self.factory.get(data={
'redirect_uri': 'http://test.localhost/'
})
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_200_OK)
self.assertIn('authorization_url', response.data)
def test_post_facebook_provider_success_returns_token(self):
data = {'code': 'XYZ', 'state': 'ABC'}
mock.patch(
'social_core.backends.facebook.FacebookOAuth2.auth_complete',
return_value=create_user()
).start()
mock.patch(
'social_core.backends.oauth.OAuthAuth.get_session_state',
return_value=data['state']
).start()
request = self.factory.post()
request.GET = {k: v for k, v in six.iteritems(data)}
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_201_CREATED)
self.assertEqual(set(response.data.keys()), {'token', 'user'})
def test_post_facebook_provider_code_validation_fails(self):
data = {'code': 'XYZ', 'state': 'ABC'}
mock.patch(
'social_core.backends.facebook.FacebookOAuth2.auth_complete',
side_effect=AuthException(backend=None)
).start()
mock.patch(
'social_core.backends.oauth.OAuthAuth.get_session_state',
return_value=data['state']
).start()
request = self.factory.post()
request.GET = {k: v for k, v in six.iteritems(data)}
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
def test_post_facebook_provider_validation_fails_if_invalid_state(self):
data = {'code': 'XYZ', 'state': 'ABC'}
mock.patch(
'social_core.backends.oauth.OAuthAuth.get_session_state',
return_value=data['state'][::-1]
).start()
request = self.factory.post()
request.GET = {k: v for k, v in six.iteritems(data)}
response = self.view(request, provider='facebook')
self.assert_status_equal(response, status.HTTP_400_BAD_REQUEST)
| mit | -147,925,745,511,879,970 | 37.928571 | 77 | 0.649847 | false |
sbhal/be-fruitful | pythonProject/qlearning_tf.py | 1 | 5122 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 19:14:33 2017
@author: sbhal
"""
import numpy as np
import pandas as pd
import random
import tensorflow as tf
class qlearningTF:
def __init__(self, m_criteria, initialWeights=None):
if initialWeights == None:
self.weights = np.full(m_criteria, 3) #assign dtype
else:
self.weights = initialWeights
self.weightBins = 3 #.3 .7. .5
self.e = 0.5
self.lr = .8
self.y = .95
self.m_criteria = m_criteria
self.actionStatesCount = 3 #+-0
# initialize Q table
self.currState = "33"
self.Qrows = pow(self.weightBins,self.m_criteria)
self.Qcols = self.m_criteria* self.actionStatesCount
# These lines establish the feed-forward part of the network used to choose actions
self.inputs1 = tf.placeholder(shape=[1, self.Qrows], dtype=tf.float32)
#self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.01))
self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.00))
self.Qout = tf.matmul(self.inputs1, self.W)
self.predict = tf.argmax(self.Qout, 1)
# Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.nextQ = tf.placeholder(shape=[1, self.Qcols], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(self.nextQ - self.Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
self.updateModel = trainer.minimize(loss)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
def learn(self, s, a, reward, s1): #curState ----action----> finalState (+reward)
allQ = self.sess.run(self.Qout, feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1]})
value2 = np.max(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s1:s1+1]}))
allQ[0, a] = reward + self.y * value2
_, W1 = self.sess.run([self.updateModel, self.W], feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1], self.nextQ: allQ})
# print(self.sess.run(self.W), " weight updated @ state", self.currState)
self.currState = self.state_num_to_string(s1)
def currToFinalState (self, a, c):
c_num = list(map(int, c))
if a[2] == "+":
c_num[int(a[1])] = min(7, c_num[int(a[1])]+2)
else:
c_num[int(a[1])] = max(3, c_num[int(a[1])] - 2)
return "".join(map(str,c_num))
def update(self, action, latency):
reward = 0 if latency==0 else 1/latency
finalState = self.currToFinalState(action, self.currState)
s = self.state_string_to_num(self.currState)
s1 = self.state_string_to_num(finalState)
a = self.action_string_to_num(action)
self.learn (s, a, reward, s1)
def choose_action(self, currState):
#verify if currState has correct format
s = self.state_string_to_num(currState)
if np.random.rand(1) < self.e:
# print("Random action Chosen")
return self.action_num_to_string(random.randrange(0, self.Qcols))
else:
a = np.argmax(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s:s+1]}))
return self.action_num_to_string(a)
def state_string_to_num(self, s):
dict = {'3': 0,
'5': 1,
'7': 2}
sum =0
for i, c in enumerate(reversed(s)):
sum += pow(self.weightBins,i) * dict[c]
return sum
def state_num_to_string(self, num):
dict = {'0':'3',
'1':'5',
'2':'7'}
mynum = num
strr = ""
string = ""
for i in reversed(range(0,self.m_criteria)):
strr += str(mynum // pow(self.weightBins, i))
mynum = mynum % pow(self.weightBins, i)
for i,c in enumerate(strr):
string += dict[strr[i]]
return string
def action_num_to_string(self, num):
dict = {0: "+",
1: "-",
2: "0"}
quotient = num // self.weightBins
remainder = num % self.weightBins
return "w"+ str(quotient) + dict[remainder]
def action_string_to_num(self, s):
dict = { "+": 0,
"-": 1,
"0": 2}
return (int(s[1]) * self.weightBins) + dict[s[2]]
if __name__ == "__main__":
myIns = qlearningTF(m_criteria=2)
print (myIns.state_string_to_num("33"))
print(myIns.state_string_to_num("53"))
print(myIns.state_string_to_num("77"))
print(myIns.action_num_to_string(0))
print(myIns.action_num_to_string(4))
print(myIns.state_num_to_string(0))
print(myIns.state_num_to_string(3))
print(myIns.state_num_to_string(8))
print("From here:")
action = myIns.choose_action("33")
print("Action given is", action)
myIns.update(action, 300)
print("new")
action = myIns.choose_action("77")
myIns.update(action, 300)
print(myIns.choose_action("33"))
| mit | 606,855,611,802,600,200 | 36.115942 | 135 | 0.577509 | false |
sarahdunbar/Multiplication-table | multiplication-table.py | 1 | 1123 | """
multiplication-table.py
Author: Sarah Dunbar
Credit: http://stackoverflow.com/questions/12102749/how-can-i-suppress-the-newline-after-a-print-statement,
https://docs.python.org/3.3/library/functions.html#print, Mr. Dennison
Assignment:
Write and submit a Python program that prints a multiplication table. The user
must be able to determine the width and height of the table before it is printed.
The final multiplication table should look like this:
Width of multiplication table: 10
Height of multiplication table: 8
1 2 3 4 5 6 7 8 9 10
2 4 6 8 10 12 14 16 18 20
3 6 9 12 15 18 21 24 27 30
4 8 12 16 20 24 28 32 36 40
5 10 15 20 25 30 35 40 45 50
6 12 18 24 30 36 42 48 54 60
7 14 21 28 35 42 49 56 63 70
8 16 24 32 40 48 56 64 72 80
"""
i = input ("Width of multiplication table: ")
i = int(i)
j = input ("Height of multiplication table: ")
j = int(j)
r = 1
while r <= j:
t = 1
while t <= i:
print("{0:>3}".format(r*t), " ", end="")
t = t + 1
print(" ", end="\n")
r = r + 1
| mit | 5,617,071,994,563,114,000 | 28.552632 | 107 | 0.610864 | false |
arne-cl/discoursegraphs | src/discoursegraphs/readwrite/__init__.py | 1 | 2684 | # discoursegraphs.readwrite: input/output functionality
"""
The ``readwrite`` package contains importers, exporters and other
output functionality. Basically, it allows you to convert annotated
linguistic documents into a graph-based representation for further
processing.
"""
from discoursegraphs.readwrite.anaphoricity import AnaphoraDocumentGraph, read_anaphoricity
from discoursegraphs.readwrite.brackets import write_brackets
from discoursegraphs.readwrite.brat import write_brat
from discoursegraphs.readwrite.conano import ConanoDocumentGraph, read_conano
from discoursegraphs.readwrite.conll import ConllDocumentGraph, read_conll, write_conll
from discoursegraphs.readwrite.decour import DecourDocumentGraph, read_decour
from discoursegraphs.readwrite.dot import write_dot
from discoursegraphs.readwrite.exmaralda import (
ExmaraldaDocumentGraph, read_exb, read_exmaralda, write_exmaralda, write_exb)
from discoursegraphs.readwrite.exportxml import read_exportxml
from discoursegraphs.readwrite.freqt import docgraph2freqt, write_freqt
from discoursegraphs.readwrite.gexf import write_gexf
from discoursegraphs.readwrite.graphml import write_graphml
from discoursegraphs.readwrite.mmax2 import MMAXDocumentGraph, read_mmax2
from discoursegraphs.readwrite.neo4j import write_neo4j, write_geoff
from discoursegraphs.readwrite.paulaxml.paula import PaulaDocument, write_paula
from discoursegraphs.readwrite.ptb import PTBDocumentGraph, read_ptb, read_mrg
from discoursegraphs.readwrite.rst.rs3 import RSTGraph, RSTTree, read_rst, read_rs3
from discoursegraphs.readwrite.rst.rs3.rs3tree import read_rs3tree
from discoursegraphs.readwrite.rst.rs3.rs3filewriter import RS3FileWriter, write_rs3
from discoursegraphs.readwrite.rst.rstlatex import RSTLatexFileWriter, write_rstlatex
from discoursegraphs.readwrite.rst.hilda import HILDARSTTree, read_hilda
from discoursegraphs.readwrite.rst.heilman_sagae_2015 import HS2015RSTTree, read_hs2015tree
from discoursegraphs.readwrite.rst.stagedp import StageDPRSTTree, read_stagedp
from discoursegraphs.readwrite.rst.dis.disgraph import read_dis
from discoursegraphs.readwrite.rst.dis.distree import read_distree
from discoursegraphs.readwrite.rst.dis.disfilewriter import DisFileWriter, write_dis
from discoursegraphs.readwrite.rst.dplp import DPLPRSTTree, read_dplp
from discoursegraphs.readwrite.rst.dis.codra import read_codra
from discoursegraphs.readwrite.rst.urml import URMLDocumentGraph, read_urml
from discoursegraphs.readwrite.salt.saltxmi import SaltDocument, SaltXMIGraph
from discoursegraphs.readwrite.tiger import TigerDocumentGraph, read_tiger
from discoursegraphs.readwrite.tree import t, tree2bracket, write_svgtree
| bsd-3-clause | -3,401,569,570,732,145,000 | 62.904762 | 91 | 0.861028 | false |
kyleabeauchamp/EnsemblePaper | code/model_building/evaluate_BW_entropy.py | 1 | 1791 | import pandas as pd
import numpy as np
from fitensemble import bayesian_weighting, belt
import experiment_loader
import ALA3
prior = "BW"
ff = "amber96"
stride = 1000
regularization_strength = 10.0
thin = 400
factor = 50
steps = 1000000
predictions_framewise, measurements, uncertainties = experiment_loader.load(ff, stride=stride)
phi, psi, ass_raw0, state_ind0 = experiment_loader.load_rama(ff, stride)
num_states = len(phi)
assignments = np.arange(num_states)
prior_pops = np.ones(num_states)
predictions = pd.DataFrame(bayesian_weighting.framewise_to_statewise(predictions_framewise, assignments), columns=predictions_framewise.columns)
model = bayesian_weighting.MaxentBayesianWeighting(predictions.values, measurements.values, uncertainties.values, assignments, regularization_strength)
model.sample(steps * factor, thin=thin * factor)
model2 = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength)
model2.sample(steps, thin=thin)
pi = model.mcmc.trace("matrix_populations")[:, 0]
num_samples = len(pi)
data = np.zeros((num_samples, num_samples))
for i, p in enumerate(model.iterate_populations()):
print(i)
for j, p2 in enumerate(model2.iterate_populations()):
data[i, j] = p.dot(np.log(p / p2))
p_bw = model.accumulate_populations()
p_BELT = model2.accumulate_populations()
chi2 = []
prior = []
H_terms = []
for j, p2 in enumerate(model2.iterate_populations()):
mu = predictions.T.dot(p2)
chi2.append(0.5 * (((mu - measurements) / uncertainties) ** 2).sum())
prior.append(regularization_strength * -1.0 * p2.dot(np.log(p2)))
H = -np.diag(p2[:-1] ** -1.) - p[-1] ** -1.
H_terms.append(0.5 * np.linalg.slogdet(H)[1])
R = pd.DataFrame({"chi2":chi2, "prior":prior, "H":H_terms})
| gpl-3.0 | 1,257,935,225,862,620,400 | 31.563636 | 151 | 0.719151 | false |
inmcm/Simon_Speck_Ciphers | Python/simonspeckciphers/tests/test_simonspeck.py | 1 | 31076 | import pytest
from random import randint
from speck import SpeckCipher
from simon import SimonCipher
# Official Test Vectors
class TestOfficialTestVectors:
"""
Official Test Vector From the Original Paper
"The SIMON and SPECK Families of Lightweight Block Ciphers"
"""
# Speck Test Vectors
def test_speck32_64(self):
key = 0x1918111009080100
plaintxt = 0x6574694c
ciphertxt = 0xa86842f2
block_size = 32
key_size = 64
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck48_72(self):
key = 0x1211100a0908020100
plaintxt = 0x20796c6c6172
ciphertxt = 0xc049a5385adc
block_size = 48
key_size = 72
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck48_96(self):
key = 0x1a19181211100a0908020100
plaintxt = 0x6d2073696874
ciphertxt = 0x735e10b6445d
block_size = 48
key_size = 96
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck64_96(self):
key = 0x131211100b0a090803020100
plaintxt = 0x74614620736e6165
ciphertxt = 0x9f7952ec4175946c
block_size = 64
key_size = 96
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck64_128(self):
key = 0x1b1a1918131211100b0a090803020100
plaintxt = 0x3b7265747475432d
ciphertxt = 0x8c6fa548454e028b
block_size = 64
key_size = 128
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck96_96(self):
key = 0x0d0c0b0a0908050403020100
plaintxt = 0x65776f68202c656761737520
ciphertxt = 0x9e4d09ab717862bdde8f79aa
block_size = 96
key_size = 96
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck96_144(self):
key = 0x1514131211100d0c0b0a0908050403020100
plaintxt = 0x656d6974206e69202c726576
ciphertxt = 0x2bf31072228a7ae440252ee6
block_size = 96
key_size = 144
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck128_128(self):
key = 0x0f0e0d0c0b0a09080706050403020100
plaintxt = 0x6c617669757165207469206564616d20
ciphertxt = 0xa65d9851797832657860fedf5c570d18
block_size = 128
key_size = 128
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck128_192(self):
key = 0x17161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x726148206665696843206f7420746e65
ciphertxt = 0x1be4cf3a13135566f9bc185de03c1886
block_size = 128
key_size = 192
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_speck128_256(self):
key = 0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x65736f6874206e49202e72656e6f6f70
ciphertxt = 0x4109010405c0f53e4eeeb48d9c188f43
block_size = 128
key_size = 256
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
# Simon Test Vectors
def test_simon32_64(self):
key = 0x1918111009080100
plaintxt = 0x65656877
ciphertxt = 0xc69be9bb
block_size = 32
key_size = 64
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon48_72(self):
key = 0x1211100a0908020100
plaintxt = 0x6120676e696c
ciphertxt = 0xdae5ac292cac
block_size = 48
key_size = 72
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon48_96(self):
key = 0x1a19181211100a0908020100
plaintxt = 0x72696320646e
ciphertxt = 0x6e06a5acf156
block_size = 48
key_size = 96
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon64_96(self):
key = 0x131211100b0a090803020100
plaintxt = 0x6f7220676e696c63
ciphertxt = 0x5ca2e27f111a8fc8
block_size = 64
key_size = 96
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon64_128(self):
key = 0x1b1a1918131211100b0a090803020100
plaintxt = 0x656b696c20646e75
ciphertxt = 0x44c8fc20b9dfa07a
block_size = 64
key_size = 128
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon96_96(self):
key = 0x0d0c0b0a0908050403020100
plaintxt = 0x2072616c6c69702065687420
ciphertxt = 0x602807a462b469063d8ff082
block_size = 96
key_size = 96
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon96_144(self):
key = 0x1514131211100d0c0b0a0908050403020100
plaintxt = 0x74616874207473756420666f
ciphertxt = 0xecad1c6c451e3f59c5db1ae9
block_size = 96
key_size = 144
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon128_128(self):
key = 0x0f0e0d0c0b0a09080706050403020100
plaintxt = 0x63736564207372656c6c657661727420
ciphertxt = 0x49681b1e1e54fe3f65aa832af84e0bbc
block_size = 128
key_size = 128
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon128_192(self):
key = 0x17161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x206572656874206e6568772065626972
ciphertxt = 0xc4ac61effcdc0d4f6c9c8d6e2597b85b
block_size = 128
key_size = 192
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
def test_simon128_256(self):
key = 0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x74206e69206d6f6f6d69732061207369
ciphertxt = 0x8d2b5579afc8a3a03bf72a87efe7b868
block_size = 128
key_size = 256
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.encrypt(plaintxt) == ciphertxt
assert c.decrypt(ciphertxt) == plaintxt
class TestRandomTestVectors:
"""
Unofficial Test Vectors Randomly Generated
Key/Plaintext are printed out with each test in case of failure
"""
test_cnt = 500
def test_speck32_64(self):
block_size = 32
key_size = 64
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck48_72(self):
block_size = 48
key_size = 72
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck48_96(self):
block_size = 48
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck64_96(self):
block_size = 64
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck64_128(self):
block_size = 64
key_size = 128
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck96_96(self):
block_size = 96
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck96_144(self):
block_size = 96
key_size = 144
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck128_128(self):
block_size = 128
key_size = 128
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck128_192(self):
block_size = 128
key_size = 192
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_speck128_256(self):
block_size = 128
key_size = 256
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SpeckCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon32_64(self):
block_size = 32
key_size = 64
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon48_72(self):
block_size = 48
key_size = 72
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon48_96(self):
block_size = 48
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon64_96(self):
block_size = 64
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon64_128(self):
block_size = 64
key_size = 128
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon96_96(self):
block_size = 96
key_size = 96
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon96_144(self):
block_size = 96
key_size = 144
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon128_128(self):
block_size = 128
key_size = 128
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon128_192(self):
block_size = 128
key_size = 192
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
def test_simon128_256(self):
block_size = 128
key_size = 256
for x in range(self.test_cnt):
key = randint(0, (2**key_size) - 1)
plaintxt = randint(0, (2**block_size) - 1)
c = SimonCipher(key, key_size, block_size, 'ECB')
assert c.decrypt(c.encrypt(plaintxt)) == plaintxt, 'Test %r Failed with Random Key %r and Random Plaintext %r' % (x, hex(key), hex(plaintxt))
class TestCipherInitialization:
not_ints = [6.22, 'hello', bytearray(b'stuffandbytes'), bytearray([12, 34, 0xAA, 00, 0x00, 34]), '0x1234567']
def test_bad_keys_speck(self):
for bad_key in self.not_ints:
with pytest.raises(TypeError):
SpeckCipher(bad_key)
def test_bad_keys_simon(self):
for bad_key in self.not_ints:
with pytest.raises(TypeError):
SimonCipher(bad_key)
def test_bad_counters_speck(self):
for bad_counter in self.not_ints:
with pytest.raises(TypeError):
SpeckCipher(0, counter=bad_counter)
def test_bad_counters_simon(self):
for bad_counters in self.not_ints:
with pytest.raises(TypeError):
SimonCipher(0, counter=bad_counters)
def test_bad_ivs_speck(self):
for bad_iv in self.not_ints:
with pytest.raises(TypeError):
SpeckCipher(0, init=bad_iv)
def test_bad_ivs_simon(self):
for bad_iv in self.not_ints:
with pytest.raises(TypeError):
SimonCipher(0, init=bad_iv)
not_block_modes = [7.1231, 'ERT', 11]
def test_bad_modes_speck(self):
for bad_mode in self.not_block_modes:
with pytest.raises(ValueError):
SpeckCipher(0, mode=bad_mode)
def test_bad_modes_simon(self):
for bad_mode in self.not_block_modes:
with pytest.raises(ValueError):
SimonCipher(0, mode=bad_mode)
not_block_sizes = [10, 'steve', 11.8]
def test_bad_blocksizes_speck(self):
for bad_bsize in self.not_block_sizes:
with pytest.raises(KeyError):
SpeckCipher(0, block_size=bad_bsize)
def test_bad_blocksizes_simon(self):
for bad_bsize in self.not_block_sizes:
with pytest.raises(KeyError):
SimonCipher(0, block_size=bad_bsize)
not_key_sizes = [100000, 'eve', 11.8, 127]
def test_bad_keysize_speck(self):
for bad_ksize in self.not_key_sizes:
with pytest.raises(KeyError):
SpeckCipher(0, key_size=bad_ksize)
def test_bad_keysize_simon(self):
for bad_ksize in self.not_key_sizes:
with pytest.raises(KeyError):
SimonCipher(0, key_size=bad_ksize)
class TestCipherModesSpeck:
key = 0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x65736f6874206e49202e72656e6f6f70
iv = 0x123456789ABCDEF0
counter = 0x1
block_size = 128
key_size = 256
def test_ctr_mode_equivalent(self):
c1 = SpeckCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = c1.encrypt(self.plaintxt)
c2 = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c2.encrypt(self.iv + self.counter)
ctr_equivalent = ecb_out ^ self.plaintxt
assert ctr_out == ctr_equivalent
c1 = SpeckCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
c2 = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c2.encrypt(self.iv + self.counter)
ctr_equivalent ^= ecb_out
ctr_out = c1.decrypt(ctr_out)
assert self.plaintxt == ctr_equivalent == ctr_out
def test_ctr_mode_single_cycle(self):
self.counter = 0x01
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = c.encrypt(self.plaintxt)
self.counter = 0x01
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
output_plaintext = c.decrypt(ctr_out)
assert output_plaintext == self.plaintxt
def test_ctr_mode_chain(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = 0
for x in range(1000):
ctr_out = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
ctr_equivalent = 0
for x in range(1000):
ecb_out = c.encrypt(self.iv + self.counter)
self.counter += 1
ctr_equivalent = ecb_out ^ self.plaintxt
assert ctr_out == ctr_equivalent
def test_cbc_mode_single(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
cbc_out = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_equivalent = c.encrypt(self.iv ^ self.plaintxt)
assert cbc_out == cbc_equivalent
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
cbc_out = c.decrypt(cbc_out)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_equivalent = c.decrypt(cbc_equivalent) ^ self.iv
assert hex(cbc_out) == hex(cbc_equivalent) == hex(self.plaintxt)
def test_cbc_mode_chain(self):
c1 = SpeckCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
c2 = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_iv_equivalent = self.iv
for x in range(1000):
cbc_input = self.plaintxt ^ cbc_iv_equivalent
cbc_iv_equivalent = c2.encrypt(cbc_input)
cbc_out = c1.encrypt(self.plaintxt)
assert cbc_out == cbc_iv_equivalent
def test_pcbc_mode_single(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'PCBC', init=self.iv)
pcbc_out = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
pcbc_equivalent = c.encrypt(self.iv ^ self.plaintxt)
assert pcbc_out == pcbc_equivalent
def test_pcbc_mode_chain(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'PCBC', init=self.iv)
pcbc_out = 0
for x in range(1000):
pcbc_out = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
pcbc_equivalent = 0
for x in range(1000):
pcbc_input = self.plaintxt ^ self.iv
pcbc_equivalent = c.encrypt(pcbc_input)
self.iv = pcbc_equivalent ^ self.plaintxt
assert pcbc_out == pcbc_equivalent
def test_cfb_mode_equivalent(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
cfb_encrypt = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
cfb_decrypt = c.decrypt(cfb_encrypt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c.encrypt(self.iv)
cfb_equivalent_encrypt = ecb_out ^ self.plaintxt
cfb_equivalent_decrypt = ecb_out ^ cfb_equivalent_encrypt
assert cfb_encrypt == cfb_equivalent_encrypt
assert cfb_decrypt == cfb_equivalent_decrypt
def test_cfb_mode_chain(self):
plaintxts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
ciphertexts = [c.encrypt(x) for x in plaintxts]
c = SpeckCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
decryptexts = [c.decrypt(x) for x in ciphertexts]
assert plaintxts == decryptexts
def test_ofb_mode_equivalent(self):
c = SpeckCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ofb_encrypt = c.encrypt(self.plaintxt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ofb_decrypt = c.decrypt(ofb_encrypt)
c = SpeckCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c.encrypt(self.iv)
ofb_equivalent_encrypt = ecb_out ^ self.plaintxt
ofb_equivalent_decrypt = ecb_out ^ ofb_equivalent_encrypt
assert ofb_encrypt == ofb_equivalent_encrypt
assert ofb_decrypt == ofb_equivalent_decrypt
def test_ofb_mode_chain(self):
plaintxts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
c = SpeckCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ciphertexts = [c.encrypt(x) for x in plaintxts]
c = SpeckCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
decryptexts = [c.decrypt(x) for x in ciphertexts]
assert plaintxts == decryptexts
class TestCipherModesSimon:
key = 0x1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100
plaintxt = 0x65736f6874206e49202e72656e6f6f70
iv = 0x123456789ABCDEF0
counter = 0x1
block_size = 128
key_size = 256
def test_ctr_mode_equivalent(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c.encrypt(self.iv + self.counter)
ctr_equivalent = ecb_out ^ self.plaintxt
assert ctr_out == ctr_equivalent
def test_ctr_mode_single_cycle(self):
self.counter = 0x01
c = SimonCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = c.encrypt(self.plaintxt)
self.counter = 0x01
c = SimonCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
output_plaintext = c.decrypt(ctr_out)
assert output_plaintext == self.plaintxt
def test_ctr_mode_chain(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'CTR', init=self.iv, counter=self.counter)
ctr_out = 0
for x in range(1000):
ctr_out = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
ctr_equivalent = 0
for x in range(1000):
ecb_out = c.encrypt(self.iv + self.counter)
self.counter += 1
ctr_equivalent = ecb_out ^ self.plaintxt
assert ctr_out == ctr_equivalent
def test_cbc_mode_single(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
cbc_out = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_equivalent = c.encrypt(self.iv ^ self.plaintxt)
assert cbc_out == cbc_equivalent
c = SimonCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
cbc_out = c.decrypt(cbc_out)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_equivalent = c.decrypt(cbc_equivalent) ^ self.iv
assert hex(cbc_out) == hex(cbc_equivalent) == hex(self.plaintxt)
def test_cbc_mode_chain(self):
c1 = SimonCipher(self.key, self.key_size, self.block_size, 'CBC', init=self.iv)
c2 = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
cbc_iv_equivalent = self.iv
for x in range(1000):
cbc_input = self.plaintxt ^ cbc_iv_equivalent
cbc_iv_equivalent = c2.encrypt(cbc_input)
cbc_out = c1.encrypt(self.plaintxt)
assert cbc_out == cbc_iv_equivalent
def test_pcbc_mode_single(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'PCBC', init=self.iv)
pcbc_out = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
pcbc_equivalent = c.encrypt(self.iv ^ self.plaintxt)
assert pcbc_out == pcbc_equivalent
def test_pcbc_mode_chain(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'PCBC', init=self.iv)
cbc_out = 0
for x in range(1000):
cbc_out = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
pcbc_equivalent = 0
for x in range(1000):
pcbc_input = self.plaintxt ^ self.iv
pcbc_equivalent = c.encrypt(pcbc_input)
self.iv = pcbc_equivalent ^ self.plaintxt
assert cbc_out == pcbc_equivalent
def test_cfb_mode_equivalent(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
cfb_encrypt = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
cfb_decrypt = c.decrypt(cfb_encrypt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c.encrypt(self.iv)
cfb_equivalent_encrypt = ecb_out ^ self.plaintxt
cfb_equivalent_decrypt = ecb_out ^ cfb_equivalent_encrypt
assert cfb_encrypt == cfb_equivalent_encrypt
assert cfb_decrypt == cfb_equivalent_decrypt
def test_cfb_mode_chain(self):
plaintxts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
c = SimonCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
ciphertexts = [c.encrypt(x) for x in plaintxts]
c = SimonCipher(self.key, self.key_size, self.block_size, 'CFB', init=self.iv)
decryptexts = [c.decrypt(x) for x in ciphertexts]
assert plaintxts == decryptexts
def test_ofb_mode_equivalent(self):
c = SimonCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ofb_encrypt = c.encrypt(self.plaintxt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ofb_decrypt = c.decrypt(ofb_encrypt)
c = SimonCipher(self.key, self.key_size, self.block_size, 'ECB')
ecb_out = c.encrypt(self.iv)
ofb_equivalent_encrypt = ecb_out ^ self.plaintxt
ofb_equivalent_decrypt = ecb_out ^ ofb_equivalent_encrypt
assert ofb_encrypt == ofb_equivalent_encrypt
assert ofb_decrypt == ofb_equivalent_decrypt
def test_ofb_mode_chain(self):
plaintxts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
c = SimonCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
ciphertexts = [c.encrypt(x) for x in plaintxts]
c = SimonCipher(self.key, self.key_size, self.block_size, 'OFB', init=self.iv)
decryptexts = [c.decrypt(x) for x in ciphertexts]
assert plaintxts == decryptexts
| mit | -6,233,174,175,237,877,000 | 38.138539 | 153 | 0.609731 | false |
simonacca/TelegramLogHandler | TelegramLogHandler/handler.py | 1 | 1167 | import logging
class TelegramHandler(logging.Handler):
"""
A handler class which sends a Telegram message for each logging event.
"""
def __init__(self, token, ids):
"""
Initialize the handler.
Initialize the instance with the bot's token and a list of chat_id(s)
of the conversations that should be notified by the handler.
"""
logging.Handler.__init__(self)
self.token = token
self.ids = ids
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified chats.
"""
try:
import requests
requests_handler = logging.getLogger("requests")
url = 'https://api.telegram.org/bot{}/sendMessage'.format(self.token)
requests_handler.propagate = False
for chat_id in self.ids:
payload = {
'chat_id':chat_id,
'text': self.format(record)
}
requests.post(url, data=payload)
requests_handler.propagate = True
except:
self.handleError(record)
| mit | -6,455,856,852,596,988,000 | 28.175 | 81 | 0.547558 | false |
mradamcox/arc2arches | scripts/shapefile_local.py | 1 | 45114 | """
shapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20140507
version: 1.2.1
Compatible with Python versions 2.4-3.x
version changelog: Fixed u() to just return the byte sequence on exception
"""
__version__ = "1.2.1"
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
import tempfile
print "local pyshp"
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
if PYTHON3:
xrange = range
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
# try/catch added 2014/05/07
# returned error on dbf of shapefile
# from www.naturalearthdata.com named
# "ne_110m_admin_0_countries".
# Just returning v as is seemed to fix
# the problem. This function could
# be condensed further.
try:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
except: return v
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
@property
def __geo_interface__(self):
if self.shapeType in [POINT, POINTM, POINTZ]:
return {
'type': 'Point',
'coordinates': tuple(self.points[0])
}
elif self.shapeType in [MULTIPOINT, MULTIPOINTM, MULTIPOINTZ]:
return {
'type': 'MultiPoint',
'coordinates': tuple([tuple(p) for p in self.points])
}
elif self.shapeType in [POLYLINE, POLYLINEM, POLYLINEZ]:
if len(self.parts) == 1:
return {
'type': 'LineString',
'coordinates': tuple([tuple(p) for p in self.points])
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
return {
'type': 'MultiLineString',
'coordinates': tuple(coordinates)
}
elif self.shapeType in [POLYGON, POLYGONM, POLYGONZ]:
if len(self.parts) == 1:
return {
'type': 'Polygon',
'coordinates': (tuple([tuple(p) for p in self.points]),)
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
polys = []
poly = [coordinates[0]]
for coord in coordinates[1:]:
if signed_area(coord) < 0:
polys.append(poly)
poly = [coord]
else:
poly.append(coord)
polys.append(poly)
if len(polys) == 1:
return {
'type': 'Polygon',
'coordinates': tuple(polys[0])
}
elif len(polys) > 1:
return {
'type': 'MultiPolygon',
'coordinates': polys
}
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if is_string(args[0]):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
# Determine the start of the next record
next = f.tell() + (2 * recLength)
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values if header m values do not equal 0.0
if shapeType in (13,15,18,23,25,28,31) and not 0.0 in self.measure:
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
# Seek to the end of this record as defined by the record header because
# the shapefile spec doesn't require the actual content to meet the header
# definition. Probably allowed for lazy feature deletion.
f.seek(next)
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so iterate the full list.
for j,k in enumerate(self.iterShapes()):
if j == i:
return k
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
# Found shapefiles which report incorrect
# shp file length in the header. Can't trust
# that so we seek to the end of the file
# and figure it out.
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def iterShapes(self):
"""Serves up shapes in a shapefile as an iterator. Useful
for handling large shapefiles."""
shp = self.__getFileObj(self.shp)
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape()
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def iterRecords(self):
"""Serves up records in a dbf file as an iterator.
Useful for large shapefiles or dbf files."""
if not self.numRecords:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in xrange(self.numRecords):
r = self.__record()
if r:
yield r
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i), record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
if self.shapeType != 31:
s.shapeType = self.shapeType
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
if hasattr(s,"z"):
f.write(pack("<%sd" % len(s.z), *s.z))
else:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (13,15,18,23,25,28,31):
try:
if hasattr(s,"m"):
f.write(pack("<%sd" % len(s.m), *s.m))
else:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
if hasattr(s, "z"):
try:
if not s.z:
s.z = (0,)
f.write(pack("<d", s.z[0]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<3:
s.points[0].append(0)
f.write(pack("<d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
if hasattr(s, "m"):
try:
if not s.m:
s.m = (0,)
f.write(pack("<1d", s.m[0]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<4:
s.points[0].append(0)
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
# Make sure polygons are closed
if shapeType in (5,15,25,31):
for part in parts:
if part[0] != part[-1]:
part.append(part[0])
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val is None:
record.append("")
else:
record.append(val)
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively.
If target is specified but not shp,shx, or dbf then the target path and
file name are used. If no options or specified, a unique base file name
is generated to save the files and the base file name is returned as a
string.
"""
# Create a unique file name if one is not defined
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif not shp and not shx and not dbf:
generated = False
if not target:
temp = tempfile.NamedTemporaryFile(prefix="shapefile_",dir=os.getcwd())
target = temp.name
generated = True
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
if generated:
return target
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the file 'README.txt'. This library was originally developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
| mit | -4,559,625,308,051,093,500 | 37.297114 | 136 | 0.519972 | false |
Septima/qgis-GeoDanmarkCheck | GeoDanmarkChecker/fot/rules/rule.py | 1 | 1036 | # -*- coding: utf-8 -*-
"""
Routines for quality control of GeoDanmark map data
Copyright (C) 2016
Developed by Septima.dk for the Danish Agency for Data Supply and Efficiency
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class Rule(object):
def __init__(self, name):
self.name = name
def execute(self, beforerepo, afterrepo, errorreporter, progressreporter):
"""Executes the rule. Must be overridden"""
raise NotImplementedError() | gpl-3.0 | 7,447,081,774,711,142,000 | 37.407407 | 78 | 0.75 | false |
yugangzhang/chxanalys | chxanalys/chx_compress.py | 1 | 37856 | import os,shutil
from glob import iglob
import matplotlib.pyplot as plt
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db,
get_images,LogNorm, RUN_GUI)
from chxanalys.chx_generic_functions import (create_time_slice,get_detector, get_fields, get_sid_filenames,
load_data)
import struct
from tqdm import tqdm
from contextlib import closing
from multiprocessing import Pool
import dill
import sys
import gc
import pickle as pkl
from eiger_io.pims_reader import EigerImages
def run_dill_encoded(what):
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args, callback=None):
return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback)
def map_async(pool, fun, args ):
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
def go_through_FD(FD):
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
def compress_eigerdata( images, mask, md, filename=None, force_compress=False,
bad_pixel_threshold=1e15, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30, nobytes=4,bins=1, bad_frame_list=None,
para_compress= False, num_sub=100, dtypes='uid',reverse =True,
num_max_para_process=500, with_pickle=False, direct_load_data=False, data_path=None):
end= len(images)//bins
if filename is None:
filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid']
if dtypes!= 'uid':
para_compress= False
else:
if para_compress:
images='foo'
#para_compress= True
#print( dtypes )
if force_compress:
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process, with_pickle= with_pickle,
direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
if not os.path.exists( filename ):
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
print ("Using already created compressed file with filename as :%s."%filename)
beg=0
return read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
def read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False,
direct_load_data=False,data_path=None):
'''
Read already compress eiger data
Return
mask
avg_img
imsum
bad_frame_list
'''
#should use try and except instead of with_pickle in the future!
CAL = False
if not with_pickle:
CAL = True
else:
try:
mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) )
except:
CAL = True
if CAL:
FD = Multifile( filename, beg, end)
imgsum = np.zeros( FD.end- FD.beg, dtype= np.float )
avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float )
imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold,
hot_pixel_threshold=hot_pixel_threshold, plot_ = False,
bad_frame_list=bad_frame_list)
avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ )
FD.FID.close()
return mask, avg_img, imgsum, bad_frame_list_
def para_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,
num_max_para_process=500, cpu_core_number=72, with_pickle=True,
direct_load_data=False, data_path=None):
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
N = int( np.ceil( N/ bins ) )
Nf = int( np.ceil( N/ num_sub ) )
if Nf > cpu_core_number:
print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number)
num_sub_old = num_sub
num_sub = int( np.ceil(N/cpu_core_number))
Nf = int( np.ceil( N/ num_sub ) )
print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub ))
create_compress_header( md, filename +'-header', nobytes, bins )
#print( 'done for header here')
results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename,
num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes,
num_max_para_process=num_max_para_process,
direct_load_data=direct_load_data, data_path=data_path)
res_ = np.array( [ results[k].get() for k in list(sorted(results.keys())) ] )
imgsum = np.zeros( N )
bad_frame_list = np.zeros( N, dtype=bool )
good_count = 1
for i in range( Nf ):
mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i]
imgsum[i*num_sub: (i+1)*num_sub] = imgsum_
bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_
if i==0:
mask = mask_
avg_img = np.zeros_like( avg_img_ )
else:
mask *= mask_
if not np.sum( np.isnan( avg_img_)):
avg_img += avg_img_
good_count += 1
bad_frame_list = np.where( bad_frame_list )[0]
avg_img /= good_count
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
print( 'Combining the seperated compressed files together...')
combine_compressed( filename, Nf, del_old=True)
del results
del res_
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
def combine_compressed( filename, Nf, del_old=True):
old_files = np.concatenate( np.array([ [filename +'-header'],
[filename + '_temp-%i.tmp'%i for i in range(Nf) ]]))
combine_binary_files(filename, old_files, del_old )
def combine_binary_files(filename, old_files, del_old = False):
'''Combine binary files together'''
fn_ = open(filename, 'wb')
for ftemp in old_files:
shutil.copyfileobj( open(ftemp, 'rb'), fn_)
if del_old:
os.remove( ftemp )
fn_.close()
def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images',reverse =True,
num_max_para_process=50,direct_load_data=False, data_path=None):
'''
parallelly compressed eiger data without header, this function is for parallel compress
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
#N = int( np.ceil( N/ bins ) )
num_sub *= bins
if N%num_sub:
Nf = N// num_sub +1
print('The average image intensity would be slightly not correct, about 1% error.')
print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image')
else:
Nf = N//num_sub
print( 'It will create %i temporary files for parallel compression.'%Nf)
if Nf> num_max_para_process:
N_runs = np.int( np.ceil( Nf/float(num_max_para_process)))
print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process ))
else:
N_runs= 1
result = {}
#print( mask_filename )# + '*'* 10 + 'here' )
for nr in range( N_runs ):
if (nr+1)*num_max_para_process > Nf:
inputs= range( num_max_para_process*nr, Nf )
else:
inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) )
fns = [ filename + '_temp-%i.tmp'%i for i in inputs]
#print( nr, inputs, )
pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 )
#print( inputs )
for i in inputs:
if i*num_sub <= N:
result[i] = pool.apply_async( segment_compress_eigerdata, [
images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,direct_load_data, data_path ] )
pool.close()
pool.join()
pool.terminate()
return result
def segment_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1,
N1=None, N2=None, dtypes='images',reverse =True,direct_load_data=False, data_path=None ):
'''
Create a compressed eiger data without header, this function is for parallel compress
for parallel compress don't pass any non-scalar parameters
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images = load_data( uid, detector, reverse= reverse )[N1:N2]
else:
images = EigerImages(data_path, md)[N1:N2]
Nimg_ = len( images)
M,N = images[0].shape
avg_img = np.zeros( [M,N], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
#frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
#Nimg = Nimg_//bins
Nimg = int( np.ceil( Nimg_ / bins ) )
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
#print( time_edge, Nimg_, Nimg, bins, N1, N2 )
imgsum = np.zeros( Nimg )
if bins!=1:
#print('The frames will be binned by %s'%bins)
dtype=np.float64
fp = open( filename,'wb' )
for n in range(Nimg):
t1,t2 = time_edge[n]
if bins!=1:
img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype)
else:
img = np.array( images[t1], dtype=dtype)
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel( avg_img )[p] += v
good_count +=1
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1
del p,v, img
fp.flush()
fp.close()
avg_img /= good_count
bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)
sys.stdout.write('#')
sys.stdout.flush()
#del images, mask, avg_img, imgsum, bad_frame_list
#print( 'Should release memory here')
return mask, avg_img, imgsum, bad_frame_list
def create_compress_header( md, filename, nobytes=4, bins=1 ):
'''
Create the head for a compressed eiger data, this function is for parallel compress
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
fp.close()
def init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True,
direct_load_data=False, data_path=None):
'''
Compress the eiger data
Create a new mask by remove hot_pixel
Do image average
Do each image sum
Find badframe_list for where image sum above bad_pixel_threshold
Generate a compressed data with filename
if bins!=1, will bin the images with bin number as bins
Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]
Return
mask
avg_img
imsum
bad_frame_list
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
Nimg_ = len( images)
avg_img = np.zeros_like( images[0], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
Nimg = Nimg_//bins
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
imgsum = np.zeros( Nimg )
if bins!=1:
print('The frames will be binned by %s'%bins)
for n in tqdm( range(Nimg) ):
t1,t2 = time_edge[n]
img = np.average( images[t1:t2], axis=0 )
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
#if imgsum[n] >=bad_pixel_threshold :
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel(avg_img )[p] += v
good_count +=1
frac += dlen/Nopix
#s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v))
#n +=1
fp.close()
frac /=good_count
print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) )
avg_img /= good_count
bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0]
#bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0]
#bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0]
#bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
""" Description:
This is code that Mark wrote to open the multifile format
in compressed mode, translated to python.
This seems to work for DALSA, FCCD and EIGER in compressed mode.
It should be included in the respective detector.i files
Currently, this refers to the compression mode being '6'
Each file is image descriptor files chunked together as follows:
Header (1024 bytes)
|--------------IMG N begin--------------|
| Dlen
|---------------------------------------|
| Pixel positions (dlen*4 bytes |
| (0 based indexing in file) |
|---------------------------------------|
| Pixel data(dlen*bytes bytes) |
| (bytes is found in header |
| at position 116) |
|--------------IMG N end----------------|
|--------------IMG N+1 begin------------|
|----------------etc.....---------------|
Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End,
"""
class Multifile:
'''The class representing the multifile.
The recno is in 1 based numbering scheme (first record is 1)
This is efficient for reading in increasing order.
Note: reading same image twice in a row is like reading an earlier
numbered image and means the program starts for the beginning again.
'''
def __init__(self,filename,beg,end):
'''Multifile initialization. Open the file.
Here I use the read routine which returns byte objects
(everything is an object in python). I use struct.unpack
to convert the byte object to other data type (int object
etc)
NOTE: At each record n, the file cursor points to record n+1
'''
self.FID = open(filename,"rb")
# self.FID.seek(0,os.SEEK_SET)
self.filename = filename
#br: bytes read
br = self.FID.read(1024)
self.beg=beg
self.end=end
ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
'bytes',
'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end'
]
magic = struct.unpack('@16s', br[:16])
md_temp = struct.unpack('@8d7I916x', br[16:])
self.md = dict(zip(ms_keys, md_temp))
self.imgread=0
self.recno = 0
# some initialization stuff
self.byts = self.md['bytes']
if (self.byts==2):
self.valtype = np.uint16
elif (self.byts == 4):
self.valtype = np.uint32
elif (self.byts == 8):
self.valtype = np.float64
#now convert pieces of these bytes to our data
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
# now read first image
#print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts)
def _readHeader(self):
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
def _readImageRaw(self):
p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen)
v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen)
self.imgread=1
return(p,v)
def _readImage(self):
(p,v)=self._readImageRaw()
img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) )
np.put( np.ravel(img), p, v )
return(img)
def seekimg(self,n=None):
'''Position file to read the nth image.
For now only reads first image ignores n
'''
# the logic involving finding the cursor position
if (n is None):
n = self.recno
if (n < self.beg or n > self.end):
raise IndexError('Error, record out of range')
#print (n, self.recno, self.FID.tell() )
if ((n == self.recno) and (self.imgread==0)):
pass # do nothing
else:
if (n <= self.recno): #ensure cursor less than search pos
self.FID.seek(1024,os.SEEK_SET)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
self.recno = 0
self.imgread=0
if n == 0:
return
#have to iterate on seeking since dlen varies
#remember for rec recno, cursor is always at recno+1
if(self.imgread==0 ): #move to next header if need to
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
for i in range(self.recno+1,n):
#the less seeks performed the faster
#print (i)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
#print 's',self.dlen
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
# we are now at recno in file, read the header and data
#self._clearImage()
self._readHeader()
self.imgread=0
self.recno = n
def rdframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImage())
def rdrawframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImageRaw())
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
class Multifile_Bins( object ):
'''
Bin a compressed file with bins number
See Multifile for details for Multifile_class
'''
def __init__(self, FD, bins=100):
'''
FD: the handler of a compressed Eiger frames
bins: bins number
'''
self.FD=FD
if (FD.end - FD.beg)%bins:
print ('Please give a better bins number and make the length of FD/bins= integer')
else:
self.bins = bins
self.md = FD.md
#self.beg = FD.beg
self.beg = 0
Nimg = (FD.end - FD.beg)
slice_num = Nimg//bins
self.end = slice_num
self.time_edge = np.array(create_time_slice( N= Nimg,
slice_num= slice_num, slice_width= bins )) + FD.beg
self.get_bin_frame()
def get_bin_frame(self):
FD= self.FD
self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] )
for n in tqdm( range(len(self.time_edge))):
#print (n)
t1,t2 = self.time_edge[n]
#print( t1, t2)
self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1,
plot_ = False, show_progress = False )
def rdframe(self,n):
return self.frames[:,:,n]
def rdrawframe(self,n):
x_= np.ravel( self.rdframe(n) )
p= np.where( x_ ) [0]
v = np.array( x_[ p ])
return ( np.array(p, dtype=np.int32), v)
def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None,
show_progress=True, *argv,**kwargs):
'''Get average imagef from a data_series by every sampling number to save time'''
#avg_img = np.average(data_series[:: sampling], axis=0)
if beg is None:
beg = FD.beg
if end is None:
end = FD.end
avg_img = FD.rdframe(beg)
n=1
flag=True
if show_progress:
#print( sampling-1 + beg , end, sampling )
if bad_frame_list is None:
bad_frame_list =[]
fra_num = int( (end - beg )/sampling ) - len( bad_frame_list )
for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
#print(i, flag)
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
else:
for i in range( sampling-1 + beg , end, sampling ):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
avg_img /= n
if plot_:
if RUN_GUI:
fig = Figure()
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots()
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
im = ax.imshow(avg_img , cmap='viridis',origin='lower',
norm= LogNorm(vmin=0.001, vmax=1e2))
#ax.set_title("Masked Averaged Image")
ax.set_title('uid= %s--Masked-Averaged-Image-'%uid)
fig.colorbar(im)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--avg-img-"%uid + '.png'
plt.savefig( fp, dpi=fig.dpi)
#plt.show()
return avg_img
def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False):
"""Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
mean_intensity : array
The mean intensity of each ROI for all `images`
Dimensions:
len(mean_intensity) == len(index)
len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
qind, pixelist = roi.extract_label_indices( labeled_array )
if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) )
# handle various input for `index`
if index is None:
index = list(np.unique(labeled_array))
index.remove(0)
else:
try:
len(index)
except TypeError:
index = [index]
index = np.array( index )
#print ('here')
good_ind = np.zeros( max(qind), dtype= np.int32 )
good_ind[ index -1 ] = np.arange( len(index) ) +1
w = np.where( good_ind[qind -1 ] )[0]
qind = good_ind[ qind[w] -1 ]
pixelist = pixelist[w]
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] )
#fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
#maxqind = max(qind)
norm = np.bincount( qind )[1:]
n= 0
#for i in tqdm(range( FD.beg , FD.end )):
if not multi_cor:
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:]
n +=1
else:
ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ]
inputs = range( len(ring_masks) )
go_through_FD(FD)
pool = Pool(processes= len(inputs) )
print( 'Starting assign the tasks...')
results = {}
for i in tqdm ( inputs ):
results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
#return res
for i in inputs:
mean_intensity[:,i] = res[i]
print( 'ROI mean_intensit calculation is DONE!')
del results
del res
mean_intensity /= norm
return mean_intensity, index
def _get_mean_intensity_one_q( FD, sampling, labels ):
mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) )
n=0
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
for i in range( FD.beg, FD.end, sampling ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:]
n +=1
return mi
def get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=1e10, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30,
plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs):
'''Get the total intensity of each frame by sampling every N frames
Also get bad_frame_list by check whether above bad_pixel_threshold
Usuage:
imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000,
bad_pixel_threshold=1e10, plot_ = True)
'''
#print ( argv, kwargs )
#mask &= img < hot_pixel_threshold
imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) )
n=0
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ):
(p,v) = FD.rdrawframe(i)
if len(p)>0:
imgsum[n] = np.sum( v )
n += 1
if plot_:
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
fig, ax = plt.subplots()
ax.plot( imgsum,'bo')
ax.set_title('uid= %s--imgsum'%uid)
ax.set_xlabel( 'Frame_bin_%s'%sampling )
ax.set_ylabel( 'Total_Intensity' )
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--imgsum-"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
plt.show()
bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg
if bad_frame_list is not None:
bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) )
else:
bad_frame_list = bad_frame_list_
if len(bad_frame_list):
print ('Bad frame list length is: %s' %len(bad_frame_list))
else:
print ('No bad frames are involved.')
return imgsum,bad_frame_list
| bsd-3-clause | -9,175,865,570,974,697,000 | 39.618026 | 231 | 0.520684 | false |
arangodb/arangodb | 3rdParty/rocksdb/6.8/tools/advisor/advisor/rule_parser_example.py | 14 | 3190 | # Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.rule_parser import RulesSpec
from advisor.db_log_parser import DatabaseLogs, DataSource
from advisor.db_options_parser import DatabaseOptions
from advisor.db_stats_fetcher import LogStatsParser, OdsStatsFetcher
import argparse
def main(args):
# initialise the RulesSpec parser
rule_spec_parser = RulesSpec(args.rules_spec)
rule_spec_parser.load_rules_from_spec()
rule_spec_parser.perform_section_checks()
# initialize the DatabaseOptions object
db_options = DatabaseOptions(args.rocksdb_options)
# Create DatabaseLogs object
db_logs = DatabaseLogs(
args.log_files_path_prefix, db_options.get_column_families()
)
# Create the Log STATS object
db_log_stats = LogStatsParser(
args.log_files_path_prefix, args.stats_dump_period_sec
)
data_sources = {
DataSource.Type.DB_OPTIONS: [db_options],
DataSource.Type.LOG: [db_logs],
DataSource.Type.TIME_SERIES: [db_log_stats]
}
if args.ods_client:
data_sources[DataSource.Type.TIME_SERIES].append(OdsStatsFetcher(
args.ods_client,
args.ods_entity,
args.ods_tstart,
args.ods_tend,
args.ods_key_prefix
))
triggered_rules = rule_spec_parser.get_triggered_rules(
data_sources, db_options.get_column_families()
)
rule_spec_parser.print_rules(triggered_rules)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Use this script to get\
suggestions for improving Rocksdb performance.')
parser.add_argument(
'--rules_spec', required=True, type=str,
help='path of the file containing the expert-specified Rules'
)
parser.add_argument(
'--rocksdb_options', required=True, type=str,
help='path of the starting Rocksdb OPTIONS file'
)
parser.add_argument(
'--log_files_path_prefix', required=True, type=str,
help='path prefix of the Rocksdb LOG files'
)
parser.add_argument(
'--stats_dump_period_sec', required=True, type=int,
help='the frequency (in seconds) at which STATISTICS are printed to ' +
'the Rocksdb LOG file'
)
# ODS arguments
parser.add_argument(
'--ods_client', type=str, help='the ODS client binary'
)
parser.add_argument(
'--ods_entity', type=str,
help='the servers for which the ODS stats need to be fetched'
)
parser.add_argument(
'--ods_key_prefix', type=str,
help='the prefix that needs to be attached to the keys of time ' +
'series to be fetched from ODS'
)
parser.add_argument(
'--ods_tstart', type=int,
help='start time of timeseries to be fetched from ODS'
)
parser.add_argument(
'--ods_tend', type=int,
help='end time of timeseries to be fetched from ODS'
)
args = parser.parse_args()
main(args)
| apache-2.0 | 7,147,545,253,902,652,000 | 34.842697 | 79 | 0.655172 | false |
scott-maddox/obpds | src/obpds/examples/interactive_schottky_diode.py | 1 | 1288 | #
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
import logging; logging.basicConfig()
# Make sure we import the local obpds version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from obpds import *
# Layers
n = Layer(0.3*um, GaAs, -1e17/cm3)
# Device
d = TwoTerminalDevice(layers=[n],
contacts=[SchottkyContact(), OhmicContact()],
Fn='right')
d.interactive_zero_current() | agpl-3.0 | -4,934,516,960,856,693,000 | 32.921053 | 77 | 0.653727 | false |
internetarchive/brozzler | tests/test_cluster.py | 1 | 33391 | #!/usr/bin/env python
'''
test_cluster.py - integration tests for a brozzler cluster, expects brozzler,
warcprox, pywb, rethinkdb and other dependencies to be running already
Copyright (C) 2016-2018 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import pytest
import http.server
import threading
import urllib.request
import os
import socket
import doublethink
import time
import brozzler
import datetime
import requests
import subprocess
import http.server
import logging
import sys
import warcprox
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
def _local_address():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1)) # ip doesn't need to be reachable
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()
local_address = _local_address()
def start_service(service):
subprocess.check_call(['sudo', 'svc', '-u', '/etc/service/' + service])
def stop_service(service):
subprocess.check_call(['sudo', 'svc', '-d', '/etc/service/' + service])
while True:
status = subprocess.check_output(
['sudo', 'svstat', '/etc/service/' + service])
if b' down ' in status:
break
time.sleep(0.5)
@pytest.fixture(scope='module')
def httpd(request):
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_POST(self):
logging.info('\n%s\n%s', self.requestline, self.headers)
self.do_GET()
def do_GET(self):
logging.info('\n%s\n%s', self.requestline, self.headers)
if self.path == '/site5/redirect/':
self.send_response(303, 'See other')
self.send_header('Connection', 'close')
self.send_header('Content-Length', 0)
self.send_header('Location', '/site5/destination/')
self.end_headers()
self.wfile.write(b'')
elif self.path == '/site9/redirect.html':
self.send_response(303, 'See other')
self.send_header('Connection', 'close')
self.send_header('Content-Length', 0)
self.send_header('Location', '/site9/destination.html')
self.end_headers()
self.wfile.write(b'')
elif self.path.startswith('/infinite/'):
payload = b'''
<html>
<head>
<title>infinite site</title>
</head>
<body>
<a href='a/'>a/</a> <a href='b/'>b/</a> <a href='c/'>c/</a>
<a href='d/'>d/</a> <a href='e/'>e/</a> <a href='f/'>f/</a>
<a href='g/'>g/</a> <a href='h/'>h/</a> <a href='i/'>i/</a>
</body>
</html>
'''
self.send_response(200, 'OK')
self.send_header('Connection', 'close')
self.send_header('Content-Length', len(payload))
self.end_headers()
self.wfile.write(payload)
else:
super().do_GET()
# SimpleHTTPRequestHandler always uses CWD so we have to chdir
os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))
httpd = http.server.HTTPServer((local_address, 0), RequestHandler)
httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
httpd_thread.start()
def fin():
httpd.shutdown()
httpd.server_close()
httpd_thread.join()
request.addfinalizer(fin)
return httpd
def make_url(httpd, rel_url):
return 'http://%s:%s%s' % (local_address, httpd.server_port, rel_url)
def test_httpd(httpd):
'''
Tests that our http server is working as expected, and that two fetches
of the same url return the same payload, proving it can be used to test
deduplication.
'''
payload1 = content2 = None
url = make_url(httpd, '/site1/file1.txt')
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload1 = response.read()
assert payload1
with urllib.request.urlopen(url) as response:
assert response.status == 200
payload2 = response.read()
assert payload2
assert payload1 == payload2
def test_services_up():
'''Check that the expected services are up and running.'''
# check that rethinkdb is listening and looks sane
rr = doublethink.Rethinker(db='rethinkdb') # built-in db
tbls = rr.table_list().run()
assert len(tbls) > 10
# check that warcprox is listening
with socket.socket() as s:
# if the connect fails an exception is raised and the test fails
s.connect(('localhost', 8000))
# check that pywb is listening
with socket.socket() as s:
# if the connect fails an exception is raised and the test fails
s.connect(('localhost', 8880))
# check that brozzler dashboard is listening
with socket.socket() as s:
# if the connect fails an exception is raised and the test fails
s.connect(('localhost', 8881))
def test_brozzle_site(httpd):
test_id = 'test_brozzle_site-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site1/'),
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
# the two pages we expect to be crawled
page1 = make_url(httpd, '/site1/')
page2 = make_url(httpd, '/site1/file1.txt')
robots = make_url(httpd, '/robots.txt')
# so we can examine rethinkdb before it does anything
try:
stop_service('brozzler-worker')
assert site.id is None
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id is not None
assert len(list(frontier.site_pages(site.id))) == 1
finally:
start_service('brozzler-worker')
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# check that we got the two pages we expected
pages = list(frontier.site_pages(site.id))
assert len(pages) == 2
assert {page.url for page in pages} == {
make_url(httpd, '/site1/'), make_url(httpd, '/site1/file1.txt')}
time.sleep(2) # in case warcprox hasn't finished processing urls
# take a look at the captures table
captures = rr.table('captures').filter({'test_id':test_id}).run()
captures_by_url = {
c['url']: c for c in captures if c['http_method'] != 'HEAD'}
assert robots in captures_by_url
assert page1 in captures_by_url
assert page2 in captures_by_url
assert 'screenshot:%s' % page1 in captures_by_url
assert 'thumbnail:%s' % page1 in captures_by_url
# no screenshots of plaintext
# check pywb
t14 = captures_by_url[page2]['timestamp'].strftime('%Y%m%d%H%M%S')
wb_url = 'http://localhost:8880/brozzler/%s/%s' % (t14, page2)
expected_payload = open(os.path.join(
os.path.dirname(__file__), 'htdocs', 'site1', 'file1.txt'), 'rb').read()
assert requests.get(wb_url).content == expected_payload
url = 'screenshot:%s' % page1
t14 = captures_by_url[url]['timestamp'].strftime('%Y%m%d%H%M%S')
wb_url = 'http://localhost:8880/brozzler/%s/%s' % (t14, url)
response = requests.get(wb_url)
assert response.status_code == 200
assert response.headers['content-type'] == 'image/jpeg'
url = 'thumbnail:%s' % page1
t14 = captures_by_url[url]['timestamp'].strftime('%Y%m%d%H%M%S')
wb_url = 'http://localhost:8880/brozzler/%s/%s' % (t14, url)
response = requests.get(wb_url)
assert response.status_code == 200
assert response.headers['content-type'] == 'image/jpeg'
def test_proxy_warcprox(httpd):
'''Test --proxy with proxy that happens to be warcprox'''
try:
stop_service('brozzler-worker')
_test_proxy_setting(
httpd, proxy='localhost:8000', warcprox_auto=False,
is_warcprox=True)
finally:
start_service('brozzler-worker')
def test_proxy_non_warcprox(httpd):
'''Test --proxy with proxy that happens not to be warcprox'''
class DumbProxyRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_HEAD(self):
if not hasattr(self.server, 'requests'):
self.server.requests = []
logging.info('%s %s', self.command, self.path)
self.server.requests.append('%s %s' % (self.command, self.path))
response = urllib.request.urlopen(self.path)
self.wfile.write(('HTTP/1.0 %s %s\r\n' % (
response.code, response.reason)).encode('ascii'))
for header in response.getheaders():
self.wfile.write(('%s: %s\r\n' % (
header[0], header[1])).encode('ascii'))
self.wfile.write(b'\r\n')
return response
def do_GET(self):
response = self.do_HEAD()
self.copyfile(response, self.wfile)
def do_WARCPROX_WRITE_RECORD(self):
if not hasattr(self.server, 'requests'):
self.server.requests = []
logging.info('%s %s', self.command, self.path)
self.send_error(400)
proxy = http.server.HTTPServer(('localhost', 0), DumbProxyRequestHandler)
th = threading.Thread(name='dumb-proxy', target=proxy.serve_forever)
th.start()
try:
stop_service('brozzler-worker')
_test_proxy_setting(
httpd, proxy='localhost:%s' % proxy.server_port,
warcprox_auto=False, is_warcprox=False)
finally:
start_service('brozzler-worker')
assert len(proxy.requests) <= 15
assert proxy.requests.count('GET /status') == 1
assert ('GET %s' % make_url(httpd, '/site1/')) in proxy.requests
assert ('GET %s' % make_url(httpd, '/site1/file1.txt')) in proxy.requests
assert [req for req in proxy.requests if req.startswith('WARCPROX_WRITE_RECORD')] == []
proxy.shutdown()
th.join()
def test_no_proxy(httpd):
try:
stop_service('brozzler-worker')
_test_proxy_setting(
httpd, proxy=None, warcprox_auto=False, is_warcprox=False)
finally:
start_service('brozzler-worker')
# XXX how to check that no proxy was used?
def test_warcprox_auto(httpd):
'''Test --warcprox-auto'''
try:
stop_service('brozzler-worker')
_test_proxy_setting(
httpd, proxy=None, warcprox_auto=True, is_warcprox=True)
finally:
start_service('brozzler-worker')
def test_proxy_conflict():
with pytest.raises(AssertionError) as excinfo:
worker = brozzler.worker.BrozzlerWorker(
None, None, warcprox_auto=True, proxy='localhost:12345')
def _test_proxy_setting(
httpd, proxy=None, warcprox_auto=False, is_warcprox=False):
test_id = 'test_proxy=%s_warcprox_auto=%s_is_warcprox=%s-%s' % (
proxy, warcprox_auto, is_warcprox,
datetime.datetime.utcnow().isoformat())
# the two pages we expect to be crawled
page1 = make_url(httpd, '/site1/')
page2 = make_url(httpd, '/site1/file1.txt')
robots = make_url(httpd, '/robots.txt')
rr = doublethink.Rethinker('localhost', db='brozzler')
service_registry = doublethink.ServiceRegistry(rr)
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site1/'),
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
assert site.id is None
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id is not None
assert len(list(frontier.site_pages(site.id))) == 1
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, max_browsers=1,
chrome_exe=brozzler.suggest_default_chrome_exe(),
warcprox_auto=warcprox_auto, proxy=proxy)
browser = worker._browser_pool.acquire()
worker.brozzle_site(browser, site)
worker._browser_pool.release(browser)
# check proxy is set
assert site.status == 'FINISHED'
if warcprox_auto:
assert site.proxy[-5:] == ':8000'
else:
assert not site.proxy
site.refresh() # check that these things were persisted
assert site.status == 'FINISHED'
if warcprox_auto:
assert site.proxy[-5:] == ':8000'
else:
assert not site.proxy
# check that we got the two pages we expected
pages = list(frontier.site_pages(site.id))
assert len(pages) == 2
assert {page.url for page in pages} == {
make_url(httpd, '/site1/'),
make_url(httpd, '/site1/file1.txt')}
time.sleep(2) # in case warcprox hasn't finished processing urls
# take a look at the captures table
captures = rr.table('captures').filter({'test_id':test_id}).run()
captures_by_url = {
c['url']: c for c in captures if c['http_method'] != 'HEAD'}
if is_warcprox:
assert robots in captures_by_url
assert page1 in captures_by_url
assert page2 in captures_by_url
assert 'screenshot:%s' % page1 in captures_by_url
assert 'thumbnail:%s' % page1 in captures_by_url
# check pywb
t14 = captures_by_url[page2]['timestamp'].strftime('%Y%m%d%H%M%S')
wb_url = 'http://localhost:8880/brozzler/%s/%s' % (t14, page2)
expected_payload = open(os.path.join(
os.path.dirname(__file__), 'htdocs', 'site1', 'file1.txt'), 'rb').read()
assert requests.get(wb_url).content == expected_payload
else:
assert captures_by_url == {}
def test_obey_robots(httpd):
test_id = 'test_obey_robots-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site1/'),
'user_agent': 'im a badbot', # robots.txt blocks badbot
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
# so we can examine rethinkdb before it does anything
try:
stop_service('brozzler-worker')
assert site.id is None
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id is not None
site_pages = list(frontier.site_pages(site.id))
assert len(site_pages) == 1
assert site_pages[0].url == site.seed
assert site_pages[0].needs_robots_check
finally:
start_service('brozzler-worker')
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# check that only the one page is in rethinkdb
pages = list(frontier.site_pages(site.id))
assert len(pages) == 1
page = pages[0]
assert page.url == make_url(httpd, '/site1/')
assert page.blocked_by_robots
# take a look at the captures table
time.sleep(2) # in case warcprox hasn't finished processing urls
robots_url = make_url(httpd, '/robots.txt')
captures = list(rr.table('captures').filter({'test_id':test_id}).run())
assert len(captures) == 1
assert captures[0]['url'] == robots_url
# check pywb
t14 = captures[0]['timestamp'].strftime('%Y%m%d%H%M%S')
wb_url = 'http://localhost:8880/brozzler/%s/%s' % (t14, robots_url)
expected_payload = open(os.path.join(
os.path.dirname(__file__), 'htdocs', 'robots.txt'), 'rb').read()
assert requests.get(
wb_url, allow_redirects=False).content == expected_payload
def test_login(httpd):
test_id = 'test_login-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site2/'),
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}},
'username': 'test_username', 'password': 'test_password'})
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# take a look at the captures table
time.sleep(2) # in case warcprox hasn't finished processing urls
robots_url = make_url(httpd, '/robots.txt')
captures = list(rr.table('captures').filter(
{'test_id':test_id}).order_by('timestamp').run())
meth_url = ['%s %s' % (c['http_method'], c['url']) for c in captures]
# there are several forms in in htdocs/site2/login.html but only one
# that brozzler's heuristic should match and try to submit, and it has
# action='00', so we can check for that here
assert ('POST %s' % make_url(httpd, '/site2/00')) in meth_url
# sanity check the rest of the crawl
assert ('GET %s' % make_url(httpd, '/robots.txt')) in meth_url
assert ('GET %s' % make_url(httpd, '/site2/')) in meth_url
assert ('WARCPROX_WRITE_RECORD screenshot:%s' % make_url(httpd, '/site2/')) in meth_url
assert ('WARCPROX_WRITE_RECORD thumbnail:%s' % make_url(httpd, '/site2/')) in meth_url
assert ('GET %s' % make_url(httpd, '/site2/login.html')) in meth_url
assert ('WARCPROX_WRITE_RECORD screenshot:%s' % make_url(httpd, '/site2/login.html')) in meth_url
assert ('WARCPROX_WRITE_RECORD thumbnail:%s' % make_url(httpd, '/site2/login.html')) in meth_url
def test_seed_redirect(httpd):
test_id = 'test_seed_redirect-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
seed_url = make_url(httpd, '/site5/redirect/')
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site5/redirect/'),
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
assert site.scope == {'accepts': [{'ssurt': '%s//%s:http:/site5/redirect/' % (local_address, httpd.server_port)}]}
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# take a look at the pages table
pages = list(frontier.site_pages(site.id))
assert len(pages) == 2
pages.sort(key=lambda page: page.hops_from_seed)
assert pages[0].hops_from_seed == 0
assert pages[0].url == seed_url
assert pages[0].redirect_url == make_url(httpd, '/site5/destination/')
assert pages[1].hops_from_seed == 1
assert pages[1].url == make_url(httpd, '/site5/destination/page2.html')
# check that scope has been updated properly
assert site.scope == {'accepts': [
{'ssurt': '%s//%s:http:/site5/redirect/' % (local_address, httpd.server_port)},
{'ssurt': '%s//%s:http:/site5/destination/' % (local_address, httpd.server_port)}]}
def test_hashtags(httpd):
test_id = 'test_hashtags-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
seed_url = make_url(httpd, '/site7/')
site = brozzler.Site(rr, {
'seed': seed_url,
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# check that we the page we expected
pages = sorted(list(frontier.site_pages(site.id)), key=lambda p: p.url)
assert len(pages) == 2
assert pages[0].url == seed_url
assert pages[0].hops_from_seed == 0
assert pages[0].brozzle_count == 1
assert pages[0].outlinks['accepted'] == [make_url(httpd, '/site7/foo.html')]
assert not pages[0].hashtags
assert pages[1].url == make_url(httpd, '/site7/foo.html')
assert pages[1].hops_from_seed == 1
assert pages[1].brozzle_count == 1
assert sorted(pages[1].hashtags) == ['#boosh','#ignored','#whee',]
time.sleep(2) # in case warcprox hasn't finished processing urls
# take a look at the captures table
captures = rr.table('captures').filter({'test_id':test_id}).run()
captures_by_url = {
c['url']: c for c in captures if c['http_method'] != 'HEAD'}
assert seed_url in captures_by_url
assert make_url(httpd, '/site7/foo.html') in captures_by_url
assert make_url(httpd, '/site7/whee.txt') in captures_by_url
assert make_url(httpd, '/site7/boosh.txt') in captures_by_url
assert 'screenshot:%s' % seed_url in captures_by_url
assert 'thumbnail:%s' % seed_url in captures_by_url
assert 'screenshot:%s' % make_url(httpd, '/site7/foo.html') in captures_by_url
assert 'thumbnail:%s' % make_url(httpd, '/site7/foo.html') in captures_by_url
def test_redirect_hashtags(httpd):
test_id = 'test_hashtags-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
seed_url = make_url(httpd, '/site9/')
site = brozzler.Site(rr, {
'seed': seed_url,
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
frontier = brozzler.RethinkDbFrontier(rr)
brozzler.new_site(frontier, site)
assert site.id
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# check that we the page we expected
pages = sorted(list(frontier.site_pages(site.id)), key=lambda p: p.url)
assert len(pages) == 2
assert pages[0].url == seed_url
assert pages[0].hops_from_seed == 0
assert pages[0].brozzle_count == 1
assert pages[0].outlinks['accepted'] == [make_url(httpd, '/site9/redirect.html')]
assert not pages[0].hashtags
assert pages[1].url == make_url(httpd, '/site9/redirect.html')
assert pages[1].hops_from_seed == 1
assert pages[1].brozzle_count == 1
assert sorted(pages[1].hashtags) == ['#hash1','#hash2',]
time.sleep(2) # in case warcprox hasn't finished processing urls
# take a look at the captures table
captures = rr.table('captures').filter({'test_id':test_id}).run()
redirect_captures = [c for c in captures if c['url'] == make_url(httpd, '/site9/redirect.html') and c['http_method'] == 'GET']
assert len(redirect_captures) == 2 # youtube-dl + browser, no hashtags
# === expected captures ===
# 1. GET http://localhost:41243/favicon.ico
# 2. GET http://localhost:41243/robots.txt
# 3. GET http://localhost:41243/site9/
# 4. GET http://localhost:41243/site9/
# 5. GET http://localhost:41243/site9/destination.html
# 6. GET http://localhost:41243/site9/destination.html
# 7. GET http://localhost:41243/site9/redirect.html
# 8. GET http://localhost:41243/site9/redirect.html
# 9. HEAD http://localhost:41243/site9/
# 10. HEAD http://localhost:41243/site9/redirect.html
# 11. WARCPROX_WRITE_RECORD screenshot:http://localhost:41243/site9/
# 12. WARCPROX_WRITE_RECORD screenshot:http://localhost:41243/site9/redirect.html
# 13. WARCPROX_WRITE_RECORD thumbnail:http://localhost:41243/site9/
# 14. WARCPROX_WRITE_RECORD thumbnail:http://localhost:41243/site9/redirect.html
def test_stop_crawl(httpd):
test_id = 'test_stop_crawl_job-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
frontier = brozzler.RethinkDbFrontier(rr)
# create a new job with three sites that could be crawled forever
job_conf = {'seeds': [
{'url': make_url(httpd, '/infinite/foo/')},
{'url': make_url(httpd, '/infinite/bar/')},
{'url': make_url(httpd, '/infinite/baz/')}]}
job = brozzler.new_job(frontier, job_conf)
assert job.id
sites = list(frontier.job_sites(job.id))
assert not sites[0].stop_requested
assert not sites[1].stop_requested
# request crawl stop for one site using the command line entrypoint
brozzler.cli.brozzler_stop_crawl([
'brozzler-stop-crawl', '--site=%s' % sites[0].id])
sites[0].refresh()
assert sites[0].stop_requested
# stop request should be honored quickly
start = time.time()
while not sites[0].status.startswith(
'FINISHED') and time.time() - start < 120:
time.sleep(0.5)
sites[0].refresh()
assert sites[0].status == 'FINISHED_STOP_REQUESTED'
# but the other sites and the job as a whole should still be crawling
sites[1].refresh()
assert sites[1].status == 'ACTIVE'
sites[2].refresh()
assert sites[2].status == 'ACTIVE'
job.refresh()
assert job.status == 'ACTIVE'
# request crawl stop for the job using the command line entrypoint
brozzler.cli.brozzler_stop_crawl([
'brozzler-stop-crawl', '--job=%s' % job.id])
job.refresh()
assert job.stop_requested
# stop request should be honored quickly
start = time.time()
while not job.status.startswith(
'FINISHED') and time.time() - start < 120:
time.sleep(0.5)
job.refresh()
assert job.status == 'FINISHED'
# the other sites should also be FINISHED_STOP_REQUESTED
sites[0].refresh()
assert sites[0].status == 'FINISHED_STOP_REQUESTED'
sites[1].refresh()
assert sites[1].status == 'FINISHED_STOP_REQUESTED'
sites[2].refresh()
assert sites[2].status == 'FINISHED_STOP_REQUESTED'
def test_warcprox_outage_resiliency(httpd):
'''
Tests resiliency to warcprox outage.
If no instances of warcprox are healthy when starting to crawl a site,
brozzler-worker should sit there and wait until a healthy instance appears.
If an instance goes down, sites assigned to that instance should bounce
over to a healthy instance.
If all instances of warcprox go down, brozzler-worker should sit and wait.
'''
rr = doublethink.Rethinker('localhost', db='brozzler')
frontier = brozzler.RethinkDbFrontier(rr)
svcreg = doublethink.ServiceRegistry(rr)
# run two instances of warcprox
opts = warcprox.Options()
opts.address = '0.0.0.0'
opts.port = 0
opts.rethinkdb_services_url = 'rethinkdb://localhost/brozzler/services'
warcprox1 = warcprox.controller.WarcproxController(opts)
warcprox2 = warcprox.controller.WarcproxController(opts)
warcprox1_thread = threading.Thread(
target=warcprox1.run_until_shutdown, name='warcprox1')
warcprox2_thread = threading.Thread(
target=warcprox2.run_until_shutdown, name='warcprox2')
# put together a site to crawl
test_id = 'test_warcprox_death-%s' % datetime.datetime.utcnow().isoformat()
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/infinite/'),
'warcprox_meta': {'captures-table-extra-fields':{'test_id':test_id}}})
try:
# we manage warcprox instances ourselves, so stop the one running on
# the system, if any
try:
stop_service('warcprox')
except Exception as e:
logging.warning('problem stopping warcprox service: %s', e)
# queue the site for brozzling
brozzler.new_site(frontier, site)
# check that nothing happens
# XXX tail brozzler-worker.log or something?
time.sleep(30)
site.refresh()
assert site.status == 'ACTIVE'
assert not site.proxy
assert len(list(frontier.site_pages(site.id))) == 1
# start one instance of warcprox
warcprox1_thread.start()
# check that it started using that instance
start = time.time()
while not site.proxy and time.time() - start < 30:
time.sleep(0.5)
site.refresh()
assert site.proxy.endswith(':%s' % warcprox1.proxy.server_port)
# check that the site accumulates pages in the frontier, confirming
# that crawling is really happening
start = time.time()
while (len(list(frontier.site_pages(site.id))) <= 1
and time.time() - start < 60):
time.sleep(0.5)
site.refresh()
assert len(list(frontier.site_pages(site.id))) > 1
# stop warcprox #1, start warcprox #2
warcprox2_thread.start()
warcprox1.stop.set()
warcprox1_thread.join()
# check that it switched over to warcprox #2
start = time.time()
while ((not site.proxy
or not site.proxy.endswith(':%s' % warcprox2.proxy.server_port))
and time.time() - start < 30):
time.sleep(0.5)
site.refresh()
assert site.proxy.endswith(':%s' % warcprox2.proxy.server_port)
# stop warcprox #2
warcprox2.stop.set()
warcprox2_thread.join()
page_count = len(list(frontier.site_pages(site.id)))
assert page_count > 1
# check that it is waiting for a warcprox to appear
time.sleep(30)
site.refresh()
assert site.status == 'ACTIVE'
assert not site.proxy
assert len(list(frontier.site_pages(site.id))) == page_count
# stop crawling the site, else it can pollute subsequent test runs
brozzler.cli.brozzler_stop_crawl([
'brozzler-stop-crawl', '--site=%s' % site.id])
site.refresh()
assert site.stop_requested
# stop request should be honored quickly
start = time.time()
while not site.status.startswith(
'FINISHED') and time.time() - start < 120:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED_STOP_REQUESTED'
finally:
warcprox1.stop.set()
warcprox2.stop.set()
warcprox1_thread.join()
warcprox2_thread.join()
start_service('warcprox')
def test_time_limit(httpd):
test_id = 'test_time_limit-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
frontier = brozzler.RethinkDbFrontier(rr)
# create a new job with one seed that could be crawled forever
job_conf = {'seeds': [{
'url': make_url(httpd, '/infinite/foo/'),
'time_limit': 20}]}
job = brozzler.new_job(frontier, job_conf)
assert job.id
sites = list(frontier.job_sites(job.id))
assert len(sites) == 1
site = sites[0]
# time limit should be enforced pretty soon
start = time.time()
while not sites[0].status.startswith(
'FINISHED') and time.time() - start < 120:
time.sleep(0.5)
sites[0].refresh()
assert sites[0].status == 'FINISHED_TIME_LIMIT'
# all sites finished so job should be finished too
start = time.time()
job.refresh()
while not job.status == 'FINISHED' and time.time() - start < 10:
time.sleep(0.5)
job.refresh()
assert job.status == 'FINISHED'
def test_ydl_stitching(httpd):
test_id = 'test_ydl_stitching-%s' % datetime.datetime.utcnow().isoformat()
rr = doublethink.Rethinker('localhost', db='brozzler')
frontier = brozzler.RethinkDbFrontier(rr)
site = brozzler.Site(rr, {
'seed': make_url(httpd, '/site10/'),
'warcprox_meta': {
'warc-prefix': 'test_ydl_stitching',
'captures-table-extra-fields': {'test_id':test_id}}})
brozzler.new_site(frontier, site)
# the site should be brozzled fairly quickly
start = time.time()
while site.status != 'FINISHED' and time.time() - start < 300:
time.sleep(0.5)
site.refresh()
assert site.status == 'FINISHED'
# check page.videos
pages = list(frontier.site_pages(site.id))
assert len(pages) == 1
page = pages[0]
assert len(page.videos) == 6
stitched_url = 'youtube-dl:00001:%s' % make_url(httpd, '/site10/')
assert {
'blame': 'youtube-dl',
'content-length': 267900,
'content-type': 'video/mp4',
'response_code': 204,
'url': stitched_url,
} in page.videos
time.sleep(2) # in case warcprox hasn't finished processing urls
# take a look at the captures table
captures = list(rr.table('captures').filter({'test_id':test_id}).run())
l = [c for c in captures if c['url'] == stitched_url]
assert len(l) == 1
c = l[0]
assert c['filename'].startswith('test_ydl_stitching')
assert c['content_type'] == 'video/mp4'
assert c['http_method'] == 'WARCPROX_WRITE_RECORD'
| apache-2.0 | 1,845,609,178,133,929,700 | 37.513264 | 130 | 0.627055 | false |
edwardgeorge/libgmail | demos/MakeTarBall.py | 1 | 1530 | #!/usr/bin/env python
# make tarball!
VERSION = '0.3'
PACKAGENAME = 'libgmail-docs_'
import os
print "\nCreate API docs"
os.system('epydoc -o API ../libgmail.py')
def cleanup(*args):
"""Used by os.path.walk to traverse the tree and remove CVS dirs"""
if os.path.split(args[1])[1] == "CVS":
print "Remove ",args[1]
os.system('rm -r %s' % args[1])
filelist = open('filelist', 'r')
folderlist = open('folderlist', 'r')
myFiles = filelist.readlines()
myFolders = folderlist.readlines()
os.system('mkdir %s%s' % (PACKAGENAME,VERSION))
for file in myFiles:
os.system('cp %s %s%s' % (file[:-1], PACKAGENAME,VERSION))
for folder in myFolders:
os.system('mkdir %s%s/%s' % (PACKAGENAME,VERSION, folder[:-1]))
os.system('cp -r %s %s%s' % (folder[:-1],PACKAGENAME, VERSION))
# removing the CVS stuff
os.path.walk('%s%s' % (PACKAGENAME,VERSION),cleanup,None)
print "\nCreate a GNU/Linux tarball..."
try:
execString = 'tar -czf %s%s.tgz %s%s/' % (PACKAGENAME,VERSION,PACKAGENAME, VERSION)
print execString
os.system(execString)
except Exception,info:
print info,"\nYou must have the tar package installed"
else:
print "Done.\n"
print "Create a Windows compatible zipfile..."
try:
execString = 'zip -rq %s%s.zip ./%s%s' % (PACKAGENAME,VERSION,PACKAGENAME, VERSION)
print execString
os.system(execString)
except Exception,info:
print info,"\nYou must have the zip package installed."
else:
print "Done\n"
os.system('rm -rf %s%s' % (PACKAGENAME,VERSION))
| gpl-2.0 | -953,915,241,889,044,600 | 29 | 87 | 0.660784 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/operating_system_version_constant_service.py | 1 | 1324 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetOperatingSystemVersionConstantRequest",},
)
class GetOperatingSystemVersionConstantRequest(proto.Message):
r"""Request message for
[OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant][google.ads.googleads.v8.services.OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant].
Attributes:
resource_name (str):
Required. Resource name of the OS version to
fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 6,450,602,099,501,945,000 | 32.948718 | 184 | 0.737915 | false |
Kuniwak/vint | test/unit/vint/linting/formatter/test_json_formatter.py | 1 | 2170 | import unittest
from test.asserting.formatter import FormatterAssertion
import json
from pathlib import Path
from vint.linting.formatter.json_formatter import JSONFormatter
from vint.linting.level import Level
class TestJSONFormatter(FormatterAssertion, unittest.TestCase):
def test_format_violations(self):
formatter = JSONFormatter()
violations = [
{
'name': 'ProhibitSomethingEvil',
'level': Level.WARNING,
'description': 'this code is tooooo evil',
'reference': 'me',
'position': {
'line': 1,
'column': 2,
'path': str(Path('path', 'to', 'file1'))
},
},
{
'name': 'ProhibitSomethingDangerous',
'level': Level.WARNING,
'description': 'this code is tooooo dangerous',
'reference': 'you',
'position': {
'line': 11,
'column': 21,
'path': str(Path('path', 'to', 'file2'))
},
},
]
expected_output = [
{
'policy_name': 'ProhibitSomethingEvil',
'severity': 'warning',
'description': 'this code is tooooo evil',
'reference': 'me',
'line_number': 1,
'column_number': 2,
'file_path': str(Path('path', 'to', 'file1')),
},
{
'policy_name': 'ProhibitSomethingDangerous',
'severity': 'warning',
'description': 'this code is tooooo dangerous',
'reference': 'you',
'line_number': 11,
'column_number': 21,
'file_path': str(Path('path', 'to', 'file2')),
},
]
json_output = formatter.format_violations(violations)
parsed_output = json.loads(json_output)
self.maxDiff = 1500
self.assertEqual(parsed_output, expected_output)
if __name__ == '__main__':
unittest.main()
| mit | 8,809,824,793,104,851,000 | 30.911765 | 63 | 0.469585 | false |
sysadminmatmoz/odoo-clearcorp | cash_budget/wizard/cash_budget_program_populate.py | 1 | 4063 | # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import errno
from openerp.osv import osv, fields
from openerp.tools.translate import _
import base64
import logging
class cash_budget_program_populate(osv.osv_memory):
_name = 'cash.budget.program.populate'
_columns = {
'parent_account': fields.many2one('cash.budget.account', 'Catalog parent', domain=[('account_type','!=','budget'), ('active','=','True')], required=True),
}
def create_prog_line(self, cr, uid, program_id, program_code, parent_account_id=None, parent_line_id=None, previous_program_id=None,context=None ):
prog_obj = self.pool.get('cash.budget.program')
line_obj = self.pool.get('cash.budget.program.line')
account_obj = self.pool.get('cash.budget.account')
for account in account_obj.browse(cr, uid, [parent_account_id], context=context):
# for child in account_obj.browse(cr, uid, account.child_parent_ids, context=context):
if account.child_parent_ids:
for child in account.child_parent_ids:
line_name = program_code + ' - [' + child.code + ']-' + child.name
previous_program_lines = line_obj.search(cr, uid, [('program_id','=',previous_program_id),('account_id','=',child.id),],context=context)
vals = {'parent_id':parent_line_id, 'account_id':child.id, 'program_id':program_id, 'name':line_name}
if previous_program_lines:
vals['previous_year_line_id'] = previous_program_lines[0]
new_line = line_obj.create(cr, uid, vals,context=context )
program = prog_obj.browse(cr,uid,[program_id],context=context)[0]
self.create_prog_line(cr, uid, program_id, program_code, child.id, new_line, previous_program_id=program.previous_program_id.id, context=context )
if account.child_consol_ids:
program = prog_obj.browse(cr,uid,[program_id],context=context)[0]
parent_line = line_obj.browse(cr, uid, [parent_line_id],context=context)[0]
for consol_child in account.child_consol_ids:
prog_lines=line_obj.search(cr, uid, [('account_id','=',consol_child.id)],context=context)
for prg_line in line_obj.browse(cr,uid,prog_lines,context=context):
if program.plan_id.id == prg_line.program_id.plan_id.id:
line_obj.write(cr,uid,[parent_line.id],{'child_consol_ids':[(4,prg_line.id)]})
#line_name = program_code + ' - [' + child.code + ']-' + child.name
#new_line = line_obj.create(cr, uid, {'parent_id':parent_line_id, 'account_id':child.id, 'program_id':program_id, 'name':line_name} )
#self.create_prog_line(cr, uid, program_id, program_code, child.id, new_line, context=context)
return True
def bulk_line_create(self, cr, uid, ids, context=None):
prog_obj = self.pool.get('cash.budget.program')
line_obj = self.pool.get('cash.budget.program.line')
account_obj = self.pool.get('cash.budget.account')
data = self.browse(cr, uid, ids, context=context)[0]
for program in prog_obj.browse(cr, uid, context['active_ids'], context=context):
current_lines = len(program.program_lines)
if current_lines > 0:
raise osv.except_osv(_('Error!'), _('This program already contains program lines'))
line_name = program.code + ' - [' + data.parent_account.code + ']-' + data.parent_account.name
new_line = line_obj.create(cr, uid, {'account_id':data.parent_account.id, 'program_id':program.id, 'name':line_name} )
self.create_prog_line(cr, uid, program.id, program.code, data.parent_account.id, new_line , previous_program_id=program.previous_program_id.id, context=context)
return True
| agpl-3.0 | -6,161,663,397,664,325,000 | 61.492308 | 172 | 0.602659 | false |
forkbong/passata | tests/test_show.py | 1 | 3697 | # Copyright 2017 Panagiotis Ktistakis <[email protected]>
#
# This file is part of passata.
#
# passata is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# passata is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with passata. If not, see <http://www.gnu.org/licenses/>.
"""Tests for passata show."""
from tests.helpers import clipboard, run
def test_show(db):
result = run(['show', 'internet/github'])
assert result.output == (
'password: gh\n'
'username: takis\n'
)
def test_show_nonexistent_entry(db):
result = run(['show', 'internet/nonexistent'])
assert isinstance(result.exception, SystemExit)
assert result.output == "internet/nonexistent not found\n"
def test_show_entry_three_levels_deep(db):
result = run(['show', 'one/two/three'])
assert isinstance(result.exception, SystemExit)
assert result.output == "one/two/three is nested too deeply\n"
def test_show_clipboard(db):
result = run(['show', 'internet/github', '--clip'])
assert result.output == ''
assert clipboard() == 'gh'
def test_show_clipboard_whole_database(db):
result = run(['show', '--clip'])
assert isinstance(result.exception, SystemExit)
assert result.output == "Can't put the entire database to clipboard\n"
def test_show_clipboard_whole_group(db):
result = run(['show', 'internet', '--clip'])
assert isinstance(result.exception, SystemExit)
assert result.output == "Can't put the entire group to clipboard\n"
def test_show_group(db):
result = run(['show', 'internet'])
assert result.output == (
'github:\n'
' password: gh\n'
' username: takis\n'
'reddit:\n'
' password: rdt\n'
' username: sakis\n'
)
def test_show_group_with_trailing_slash(db):
result = run(['show', 'internet/'])
assert result.output == (
'github:\n'
' password: gh\n'
' username: takis\n'
'reddit:\n'
' password: rdt\n'
' username: sakis\n'
)
def test_show_color(db, editor):
# Insert a new entry with a list
editor(updated=(
'username: user\n'
'password: pass\n'
'autotype: <username> Return !1.5 <password> Return\n'
'keywords:\n'
'- youtube\n'
'- gmail\n'
))
run(['edit', 'group/google'])
# Test show without color
expected = (
'google:\n'
' username: user\n'
' password: pass\n'
' autotype: <username> Return !1.5 <password> Return\n'
' keywords:\n'
' - youtube\n'
' - gmail\n'
)
result = run(['--no-color', 'show', 'group'])
assert result.output == expected
# Test show with color
expected = (
'\033[38;5;12mgoogle\033[38;5;11m:\033[0m\n'
'\033[38;5;12m username\033[38;5;11m:\033[0m user\n'
'\033[38;5;12m password\033[38;5;11m:\033[0m pass\n'
'\033[38;5;12m autotype\033[38;5;11m:\033[0m '
'<username> Return !1.5 <password> Return\n'
'\033[38;5;12m keywords\033[38;5;11m:\033[0m\n'
'\033[38;5;9m - \033[0myoutube\n'
'\033[38;5;9m - \033[0mgmail\n'
)
result = run(['--color', 'show', 'group'])
assert result.output == expected
| gpl-3.0 | -2,492,659,238,193,900,000 | 29.303279 | 74 | 0.614011 | false |
fracpete/python-weka-wrapper3 | python/weka/clusterers.py | 1 | 20196 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# clusterers.py
# Copyright (C) 2014-2021 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import os
import sys
import argparse
import traceback
import weka.core.jvm as jvm
import weka.core.classes as classes
from weka.core.classes import serialization_write, serialization_read_all, serialization_write_all
from weka.core.classes import JavaObject, join_options
from weka.core.classes import OptionHandler
from weka.core.classes import Random
from weka.core.capabilities import Capabilities
from weka.core.dataset import Instances
from weka.core.distances import DistanceFunction
from weka.filters import Filter
# logging setup
logger = logging.getLogger("weka.clusterers")
class Clusterer(OptionHandler):
"""
Wrapper class for clusterers.
"""
def __init__(self, classname="weka.clusterers.SimpleKMeans", jobject=None, options=None):
"""
Initializes the specified clusterer using either the classname or the supplied JB_Object.
:param classname: the classname of the clusterer
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to use
:type options: list
"""
if jobject is None:
jobject = Clusterer.new_instance(classname)
self.is_updateable = self.check_type(jobject, "weka.clusterers.UpdateableClusterer")
self.is_drawable = self.check_type(jobject, "weka.core.Drawable")
self.enforce_type(jobject, "weka.clusterers.Clusterer")
self._header = None
super(Clusterer, self).__init__(jobject=jobject, options=options)
def _make_calls(self):
"""
Method for generating instances using javabridge.make_call.
Members must start with "_mc_"
"""
super(Clusterer, self)._make_calls()
self._mc_cluster = javabridge.make_call(self.jobject, "clusterInstance", "(Lweka/core/Instance;)I")
self._mc_distribution = javabridge.make_call(self.jobject, "distributionForInstance", "(Lweka/core/Instance;)[D")
@property
def capabilities(self):
"""
Returns the capabilities of the clusterer.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
@property
def header(self):
"""
Returns the header of the training data.
:return: the structure of the training data, None if not available
:rtype: Instances
"""
return self._header
def build_clusterer(self, data):
"""
Builds the clusterer with the data.
:param data: the data to use for training the clusterer
:type data: Instances
"""
self._header = data.copy_structure()
javabridge.call(self.jobject, "buildClusterer", "(Lweka/core/Instances;)V", data.jobject)
def update_clusterer(self, inst):
"""
Updates the clusterer with the instance.
:param inst: the Instance to update the clusterer with
:type inst: Instance
"""
if self.is_updateable:
javabridge.call(self.jobject, "updateClusterer", "(Lweka/core/Instance;)V", inst.jobject)
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
def update_finished(self):
"""
Signals the clusterer that updating with new data has finished.
"""
if self.is_updateable:
javabridge.call(self.jobject, "updateFinished", "()V")
else:
logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
def cluster_instance(self, inst):
"""
Peforms a prediction.
:param inst: the instance to determine the cluster for
:type inst: Instance
:return: the clustering result
:rtype: float
"""
return self._mc_cluster(inst.jobject)
def distribution_for_instance(self, inst):
"""
Peforms a prediction, returning the cluster distribution.
:param inst: the Instance to get the cluster distribution for
:type inst: Instance
:return: the cluster distribution
:rtype: float[]
"""
pred = self._mc_distribution(inst.jobject)
return javabridge.get_env().get_double_array_elements(pred)
@property
def number_of_clusters(self):
"""
Returns the number of clusters found.
:return: the number fo clusters
:rtype: int
"""
return javabridge.call(self.jobject, "numberOfClusters", "()I")
@property
def graph_type(self):
"""
Returns the graph type if classifier implements weka.core.Drawable, otherwise -1.
:return: the type
:rtype: int
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graphType", "()I")
else:
return -1
@property
def graph(self):
"""
Returns the graph if classifier implements weka.core.Drawable, otherwise None.
:return: the graph or None if not available
:rtype: str
"""
if self.is_drawable:
return javabridge.call(self.jobject, "graph", "()Ljava/lang/String;")
else:
return None
@classmethod
def make_copy(cls, clusterer):
"""
Creates a copy of the clusterer.
:param clusterer: the clustererto copy
:type clusterer: Clusterer
:return: the copy of the clusterer
:rtype: Clusterer
"""
return Clusterer(
jobject=javabridge.static_call(
"weka/clusterers/AbstractClusterer", "makeCopy",
"(Lweka/clusterers/Clusterer;)Lweka/clusterers/Clusterer;", clusterer.jobject))
@classmethod
def deserialize(cls, ser_file):
"""
Deserializes a clusterer from a file.
:param ser_file: the model file to deserialize
:type ser_file: str
:return: model and, if available, the dataset header
:rtype: tuple
"""
objs = serialization_read_all(ser_file)
if len(objs) == 1:
return Clusterer(jobject=objs[0]), None
elif len(objs) == 2:
return Clusterer(jobject=objs[0]), Instances(jobject=objs[1])
else:
raise Exception(
"Excepted one or two objects in the model file (%s), but encountered: %d" % (ser_file, len(objs)))
def serialize(self, ser_file, header=None):
"""
Serializes the clusterer to the specified file.
:param ser_file: the file to save the model to
:type ser_file: str
:param header: the (optional) dataset header to store alongside; recommended
:type header: Instances
"""
if (header is not None) and header.num_instances > 0:
header = Instances.template_instances(header)
if header is not None:
serialization_write_all(ser_file, [self, header])
else:
serialization_write(ser_file, self)
class SingleClustererEnhancer(Clusterer):
"""
Wrapper class for clusterers that use a single base clusterer.
"""
def __init__(self, classname=None, jobject=None, options=None):
"""
Initializes the specified clusterer using either the classname or the supplied JB_Object.
:param classname: the classname of the clusterer
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to use
:type options: list
"""
if jobject is None:
jobject = Clusterer.new_instance(classname)
self.enforce_type(jobject, "weka.clusterers.SingleClustererEnhancer")
super(SingleClustererEnhancer, self).__init__(classname=classname, jobject=jobject, options=options)
@property
def clusterer(self):
"""
Returns the base clusterer.
:return: the clusterer
:rtype: Clusterer
"""
return Clusterer(jobject=javabridge.call(self.jobject, "getClusterer", "()Lweka/clusterers/Clusterer;"))
@clusterer.setter
def clusterer(self, clusterer):
"""
Sets the base clusterer.
:param clusterer: the base clusterer to use
:type clusterer: Clusterer
"""
javabridge.call(self.jobject, "setClusterer", "(Lweka/clusterers/Clusterer;)V", clusterer.jobject)
class FilteredClusterer(SingleClustererEnhancer):
"""
Wrapper class for the filtered clusterer.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the specified clusterer using either the classname or the supplied JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to use
:type options: list
"""
classname = "weka.clusterers.FilteredClusterer"
if jobject is None:
jobject = Clusterer.new_instance(classname)
self.enforce_type(jobject, classname)
super(FilteredClusterer, self).__init__(classname=classname, jobject=jobject, options=options)
@property
def filter(self):
"""
Returns the filter.
:return: the filter
:rtype: Filter
"""
return Filter(jobject=javabridge.call(self.jobject, "getFilter", "()Lweka/filters/Filter;"))
@filter.setter
def filter(self, filtr):
"""
Sets the filter.
:param filtr: the filter to use
:type filtr: Filter
"""
javabridge.call(self.jobject, "setFilter", "(Lweka/filters/Filter;)V", filtr.jobject)
class ClusterEvaluation(JavaObject):
"""
Evaluation class for clusterers.
"""
def __init__(self):
"""
Initializes a ClusterEvaluation object.
"""
super(ClusterEvaluation, self).__init__(ClusterEvaluation.new_instance("weka.clusterers.ClusterEvaluation"))
def set_model(self, clusterer):
"""
Sets the built clusterer to evaluate.
:param clusterer: the clusterer to evaluate
:type clusterer: Clusterer
"""
javabridge.call(self.jobject, "setClusterer", "(Lweka/clusterers/Clusterer;)V", clusterer.jobject)
def test_model(self, test):
"""
Evaluates the currently set clusterer on the test set.
:param test: the test set to use for evaluating
:type test: Instances
"""
javabridge.call(self.jobject, "evaluateClusterer", "(Lweka/core/Instances;)V", test.jobject)
@property
def cluster_results(self):
"""
The cluster results as string.
:return: the results string
:rtype: str
"""
return javabridge.call(self.jobject, "clusterResultsToString", "()Ljava/lang/String;")
@property
def cluster_assignments(self):
"""
Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "getClusterAssignments", "()[D")
if array is None:
return None
else:
return javabridge.get_env().get_double_array_elements(array)
@property
def num_clusters(self):
"""
Returns the number of clusters.
:return: the number of clusters
:rtype: int
"""
return javabridge.call(self.jobject, "getNumClusters", "()I")
@property
def log_likelihood(self):
"""
Returns the log likelihood.
:return: the log likelihood
:rtype: float
"""
return javabridge.call(self.jobject, "getLogLikelihood", "()D")
@property
def classes_to_clusters(self):
"""
Return the array (ordered by cluster number) of minimum error class to cluster mappings.
:return: the mappings
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "getClassesToClusters", "()[I")
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array)
@classmethod
def evaluate_clusterer(cls, clusterer, args):
"""
Evaluates the clusterer with the given options.
:param clusterer: the clusterer instance to evaluate
:type clusterer: Clusterer
:param args: the command-line arguments
:type args: list
:return: the evaluation result
:rtype: str
"""
return javabridge.static_call(
"Lweka/clusterers/ClusterEvaluation;", "evaluateClusterer",
"(Lweka/clusterers/Clusterer;[Ljava/lang/String;)Ljava/lang/String;",
clusterer.jobject, args)
@classmethod
def crossvalidate_model(cls, clusterer, data, num_folds, rnd):
"""
Cross-validates the clusterer and returns the loglikelihood.
:param clusterer: the clusterer instance to evaluate
:type clusterer: Clusterer
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:return: the cross-validated loglikelihood
:rtype: float
"""
return javabridge.static_call(
"Lweka/clusterers/ClusterEvaluation;", "crossValidateModel",
"(Lweka/clusterers/DensityBasedClusterer;Lweka/core/Instances;ILjava/util/Random;)D",
clusterer.jobject, data.jobject, num_folds, rnd.jobject)
def avg_silhouette_coefficient(clusterer, dist_func, data):
"""
Computes the average silhouette coefficient for a clusterer.
Based on Eibe Frank's Groovy code:
https://weka.8497.n7.nabble.com/Silhouette-Measures-and-Dunn-Index-DI-in-Weka-td44072.html
:param clusterer: the trained clusterer model to evaluate
:type clusterer: Clusterer
:param dist_func: the distance function to use; if Euclidean, make sure that normalization is turned off
:type dist_func: DistanceFunction
:return: the average silhouette coefficient
:param data: the standardized data
:type data: Instances
:rtype: float
"""
# ensure that distance function is initialized with data
dist_func.instances = data
cluster_index_of_inst = []
for i in range(data.num_instances):
cluster_index_of_inst.append(int(clusterer.cluster_instance(data.get_instance(i))))
sum_silhouette_coefficients = 0.0
for i in range(data.num_instances):
# Compute average distance of current instance to each cluster, including its own cluster
average_distance_per_cluster = [0 for x in range(clusterer.number_of_clusters)]
num_instances_per_cluster = [0 for x in range(clusterer.number_of_clusters)]
for j in range(data.num_instances):
average_distance_per_cluster[cluster_index_of_inst[j]] += dist_func.distance(data.get_instance(i), data.get_instance(j))
num_instances_per_cluster[cluster_index_of_inst[j]] += 1 # Should the current instance be skipped though?
for k in range(len(average_distance_per_cluster)):
average_distance_per_cluster[k] /= num_instances_per_cluster[k]
# Average distance to instance's own cluster
a = average_distance_per_cluster[cluster_index_of_inst[i]]
# Find the distance of the "closest" other cluster
average_distance_per_cluster[cluster_index_of_inst[i]] = sys.float_info.max
b = min(average_distance_per_cluster)
# Compute silhouette coefficient for current instance
sum_silhouette_coefficients += ((b - a) / max(a, b)) if (clusterer.number_of_clusters > 1) else 0
return sum_silhouette_coefficients / data.num_instances
def main(args=None):
"""
Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
:param args: the command-line arguments to use, uses sys.argv if None
:type args: list
"""
parser = argparse.ArgumentParser(
description='Performs clustering from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("-T", metavar="test", dest="test", help="test set file")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-p", metavar="attributes", dest="attributes", help="attribute range")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-g", metavar="graph", dest="graph", help="graph output file (if supported)")
parser.add_argument("clusterer", help="clusterer classname, e.g., weka.clusterers.SimpleKMeans")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional clusterer options")
parsed = parser.parse_args(args=args)
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.attributes is not None:
params.extend(["-p", parsed.attributes])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
clusterer = Clusterer(classname=parsed.clusterer)
if len(parsed.option) > 0:
clusterer.options = parsed.option
print(ClusterEvaluation.evaluate_clusterer(clusterer, params))
except Exception:
print(traceback.format_exc())
finally:
jvm.stop()
def sys_main():
"""
Runs the main function using the system cli arguments, and
returns a system error code.
:return: 0 for success, 1 for failure.
:rtype: int
"""
try:
main()
return 0
except Exception:
print(traceback.format_exc())
return 1
if __name__ == "__main__":
try:
main()
except Exception:
print(traceback.format_exc())
| gpl-3.0 | 7,037,974,789,696,343,000 | 34.184669 | 132 | 0.640969 | false |
johnwlockwood/stream_tap | stream_tap/__init__.py | 1 | 1389 | from . import _meta
from collections import deque
__version__ = _meta.version
__version_info__ = _meta.version_info
class Bucket(object):
"""
Encloses a function that produces results from
an item of an iterator, accumulating any results
in a deque.
"""
def __init__(self, func):
self.func = func
self._contents = deque()
def __call__(self, *args, **kwargs):
result = self.func(*args, **kwargs)
if result is not None:
self._contents.append(result)
def contents(self):
"""
:returns: contents
"""
return self._contents
def drain_contents(self):
"""
Starts a new collection to accumulate future contents
and returns all of existing contents.
"""
existing_contents = self._contents
self._contents = deque()
return existing_contents
def stream_tap(callables, stream):
"""
Calls each callable with each item in the stream.
Use with Buckets. Make a Bucket with a callable
and then pass a tuple of those buckets
in as the callables. After iterating over
this generator, get contents from each Spigot.
:param callables: collection of callable.
:param stream: Iterator if values.
"""
for item in stream:
for caller in callables:
caller(item)
yield item
| apache-2.0 | -7,339,902,638,887,691,000 | 24.722222 | 61 | 0.614831 | false |
dilynfullerton/tr-A_dependence_plots | unused/xl.py | 1 | 1861 | """Put data into an excel workbook. Currently unused.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from openpyxl import load_workbook, Workbook
from ImsrgDataMap import ImsrgDataMapInt
from Exp import ExpInt
def print_single_particle_energy_data_to_excel(e, hw, datadir, savepath,
startrow=2):
all_data_map = ImsrgDataMapInt(parent_directory=datadir)
data_maps = all_data_map.map[ExpInt(e, hw)]
index_orbital_map = data_maps.index_orbital_map
ime_map = data_maps.index_mass_energy_map()
try:
wb = load_workbook(savepath)
except IOError:
wb = Workbook()
ws = wb.active
ws.title = 'e={e} hw={hw}'.format(e=e, hw=hw)
row = startrow
col = 1
ws.cell(row=row, column=col).value = 'KEY'
row += 1
for i, s in zip(range(5), ['Index', 'n', 'l', 'j', 'tz']):
ws.cell(row=row, column=col + i).value = s
row += 1
for oindex in sorted(index_orbital_map.keys()):
ws.cell(row=row, column=col).value = int(oindex)
qnums = index_orbital_map[oindex]
for i, qn in zip(range(1, 5), qnums):
ws.cell(row=row, column=col + i).value = qn
row += 1
row += 1
ws.cell(row=row, column=col).value = 'DATA'
row += 1
ws.cell(row=row, column=col).value = 'Index'
ws.cell(row=row, column=col + 1).value = 'A'
ws.cell(row=row, column=col + 2).value = 'energy (MeV)'
row += 1
for oindex in sorted(ime_map.keys()):
me_map = ime_map[oindex]
for m in me_map.keys():
ws.cell(row=row, column=col).value = int(oindex)
ws.cell(row=row, column=col + 1).value = int(m)
ws.cell(row=row, column=col + 2).value = me_map[m]
row += 1
wb.save(savepath)
| cc0-1.0 | -8,680,902,879,900,898,000 | 28.078125 | 72 | 0.584095 | false |
srznew/heat | doc/source/ext/resources.py | 1 | 15178 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import itertools
from docutils import core
from docutils import nodes
import pydoc
import six
from sphinx.util import compat
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import support
_CODE_NAMES = {'2013.1': 'Grizzly',
'2013.2': 'Havana',
'2014.1': 'Icehouse',
'2014.2': 'Juno',
'2015.1': 'Kilo',
'5.0.0': 'Liberty'}
all_resources = {}
class integratedrespages(nodes.General, nodes.Element):
pass
class unsupportedrespages(nodes.General, nodes.Element):
pass
class contribresourcepages(nodes.General, nodes.Element):
pass
class ResourcePages(compat.Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {}
def path(self):
return None
def statuses(self):
return support.SUPPORT_STATUSES
def run(self):
prefix = self.arguments and self.arguments.pop() or None
content = []
for resource_type, resource_classes in _filter_resources(
prefix, self.path(), self.statuses()):
for resource_class in resource_classes:
self.resource_type = resource_type
self.resource_class = resource_class
section = self._section(content, resource_type, '%s')
self.props_schemata = properties.schemata(
self.resource_class.properties_schema)
self.attrs_schemata = attributes.schemata(
self.resource_class.attributes_schema)
# NOTE(prazumovsky): Adding base_attributes_schema dict to
# Resource class should means adding new attributes from this
# dict to documentation of each resource, else there is no
# chance to learn about base attributes.
self.attrs_schemata.update(
self.resource_class.base_attributes_schema)
self.update_policy_schemata = properties.schemata(
self.resource_class.update_policy_schema)
self._status_str(resource_class.support_status, section)
cls_doc = pydoc.getdoc(resource_class)
if cls_doc:
# allow for rst in the class comments
cls_nodes = core.publish_doctree(cls_doc).children
section.extend(cls_nodes)
self.contribute_properties(section)
self.contribute_attributes(section)
self.contribute_update_policy(section)
self.contribute_hot_syntax(section)
return content
def _version_str(self, version):
if version in _CODE_NAMES:
return _("%(version)s (%(code)s)") % {'version': version,
'code': _CODE_NAMES[version]}
else:
return version
def _status_str(self, support_status, section):
while support_status is not None:
sstatus = support_status.to_dict()
if sstatus['status'] is support.SUPPORTED:
msg = _('Available')
else:
msg = sstatus['status']
if sstatus['version'] is not None:
msg = _('%s since %s') % (msg,
self._version_str(
sstatus['version']))
if sstatus['message'] is not None:
msg = _('%s - %s') % (msg, sstatus['message'])
if not (sstatus['status'] == support.SUPPORTED and
sstatus['version'] is None):
para = nodes.paragraph(_(''), msg)
note = nodes.note(_(''), para)
section.append(note)
support_status = support_status.previous_status
def _section(self, parent, title, id_pattern):
id = id_pattern % self.resource_type
section = nodes.section(ids=[id])
parent.append(section)
title = nodes.title('', title)
section.append(title)
return section
def _prop_syntax_example(self, prop):
if not prop:
return 'Value'
if prop.type == properties.Schema.LIST:
schema = lambda i: prop.schema[i] if prop.schema else None
sub_type = [self._prop_syntax_example(schema(i))
for i in range(2)]
return '[%s, %s, ...]' % tuple(sub_type)
elif prop.type == properties.Schema.MAP:
def sub_props():
for sub_key, sub_value in prop.schema.items():
if sub_value.implemented:
yield '"%s": %s' % (
sub_key, self._prop_syntax_example(sub_value))
return '{%s}' % (', '.join(sub_props()) if prop.schema else '...')
else:
return prop.type
def contribute_hot_syntax(self, parent):
section = self._section(parent, _('HOT Syntax'), '%s-hot')
props = []
for prop_key in sorted(six.iterkeys(self.props_schemata)):
prop = self.props_schemata[prop_key]
if (prop.implemented
and prop.support_status.status == support.SUPPORTED):
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
props_str = ''
if props:
props_str = '''\n properties:
%s''' % ('\n '.join(props))
template = '''heat_template_version: 2013-05-23
...
resources:
...
the_resource:
type: %s%s''' % (self.resource_type, props_str)
block = nodes.literal_block('', template, language="hot")
section.append(block)
@staticmethod
def cmp_prop(x, y):
x_key, x_prop = x
y_key, y_prop = y
if x_prop.support_status.status == y_prop.support_status.status:
return cmp(x_key, y_key)
if x_prop.support_status.status == support.SUPPORTED:
return -1
if x_prop.support_status.status == support.DEPRECATED:
return 1
return cmp(x_prop.support_status.status,
y_prop.support_status.status)
def contribute_property(self, prop_list, prop_key, prop, upd_para=None):
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
prop_item.append(nodes.classifier('', prop.type))
definition = nodes.definition()
prop_item.append(definition)
self._status_str(prop.support_status, definition)
if not prop.implemented:
para = nodes.paragraph('', _('Not implemented.'))
note = nodes.note('', para)
definition.append(note)
return
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
if upd_para is not None:
definition.append(upd_para)
else:
if prop.update_allowed:
upd_para = nodes.paragraph(
'', _('Can be updated without replacement.'))
definition.append(upd_para)
elif prop.immutable:
upd_para = nodes.paragraph('', _('Updates are not supported. '
'Resource update will fail on'
' any attempt to update this '
'property.'))
definition.append(upd_para)
else:
upd_para = nodes.paragraph('', _('Updates cause replacement.'))
definition.append(upd_para)
if prop.default is not None:
para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
definition.append(para)
for constraint in prop.constraints:
para = nodes.paragraph('', str(constraint))
definition.append(para)
sub_schema = None
if prop.schema and prop.type == properties.Schema.MAP:
para = nodes.paragraph()
emph = nodes.emphasis('', _('Map properties:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
elif prop.schema and prop.type == properties.Schema.LIST:
para = nodes.paragraph()
emph = nodes.emphasis('', _('List contents:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
if sub_schema:
sub_prop_list = nodes.definition_list()
definition.append(sub_prop_list)
for sub_prop_key, sub_prop in sorted(sub_schema.items(),
self.cmp_prop):
if sub_prop.support_status.status != support.HIDDEN:
self.contribute_property(
sub_prop_list, sub_prop_key, sub_prop, upd_para)
def contribute_properties(self, parent):
if not self.props_schemata:
return
section = self._section(parent, _('Properties'), '%s-props')
prop_list_required = nodes.definition_list()
subsection_required = self._section(section, _('required'),
'%s-props-req')
subsection_required.append(prop_list_required)
prop_list_optional = nodes.definition_list()
subsection_optional = self._section(section, _('optional'),
'%s-props-opt')
subsection_optional.append(prop_list_optional)
for prop_key, prop in sorted(self.props_schemata.items(),
self.cmp_prop):
if prop.support_status.status != support.HIDDEN:
if prop.required:
prop_list = prop_list_required
else:
prop_list = prop_list_optional
self.contribute_property(prop_list, prop_key, prop)
def contribute_attributes(self, parent):
if not self.attrs_schemata:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key, prop in sorted(self.attrs_schemata.items()):
if prop.support_status.status != support.HIDDEN:
description = prop.description
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
definition = nodes.definition()
prop_item.append(definition)
self._status_str(prop.support_status, definition)
if description:
def_para = nodes.paragraph('', description)
definition.append(def_para)
def contribute_update_policy(self, parent):
if not self.update_policy_schemata:
return
section = self._section(parent, _('UpdatePolicy'), '%s-updpolicy')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key, prop in sorted(self.update_policy_schemata.items(),
self.cmp_prop):
self.contribute_property(prop_list, prop_key, prop)
class IntegrateResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [support.SUPPORTED]
class UnsupportedResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [s for s in support.SUPPORT_STATUSES if s != support.SUPPORTED]
class ContribResourcePages(ResourcePages):
def path(self):
return 'heat.engine.plugins'
def _filter_resources(prefix=None, path=None, statuses=[]):
def not_hidden_match(cls):
return cls.support_status.status != support.HIDDEN
def prefix_match(name):
return prefix is None or name.startswith(prefix)
def path_match(cls):
return path is None or cls.__module__.startswith(path)
def status_match(cls):
return cls.support_status.status in statuses
filtered_resources = {}
for name in sorted(six.iterkeys(all_resources)):
if prefix_match(name):
for cls in all_resources.get(name):
if (path_match(cls) and status_match(cls) and
not_hidden_match(cls)):
if filtered_resources.get(name) is not None:
filtered_resources[name].append(cls)
else:
filtered_resources[name] = [cls]
return sorted(six.iteritems(filtered_resources))
def _load_all_resources():
manager = plugin_manager.PluginManager('heat.engine.resources')
resource_mapping = plugin_manager.PluginMapping('resource')
res_plugin_mappings = resource_mapping.load_all(manager)
for mapping in res_plugin_mappings:
name, cls = mapping
if all_resources.get(name) is not None:
all_resources[name].append(cls)
else:
all_resources[name] = [cls]
def link_resource(app, env, node, contnode):
reftarget = node.attributes['reftarget']
for resource_name in all_resources:
if resource_name.lower() == reftarget.lower():
resource = all_resources[resource_name]
refnode = nodes.reference('', '', internal=True)
refnode['reftitle'] = resource_name
if resource_name.startswith('AWS'):
source = 'template_guide/cfn'
else:
source = 'template_guide/openstack'
uri = app.builder.get_relative_uri(
node.attributes['refdoc'], source)
refnode['refuri'] = '%s#%s' % (uri, resource_name)
refnode.append(contnode)
return refnode
def setup(app):
_load_all_resources()
app.add_node(integratedrespages)
app.add_directive('integratedrespages', IntegrateResourcePages)
app.add_node(unsupportedrespages)
app.add_directive('unsupportedrespages', UnsupportedResourcePages)
app.add_node(contribresourcepages)
app.add_directive('contribrespages', ContribResourcePages)
app.connect('missing-reference', link_resource)
| apache-2.0 | 401,516,824,030,433,600 | 35.224344 | 79 | 0.564238 | false |
ngtrhieu/outline_alignment | autumn_utils/feature_mappings.py | 1 | 1570 | import cv
import cv2
import numpy as np
import math
def get_features (cnt, approx = 5):
return cv2.approxPolyDP (cnt, approx, False)
def simplify_feature (feature):
simple = []
prev = None
for v in feature:
dist = 5000
if prev is not None:
dist = np.linalg.norm (v - prev)
if dist > 2:
simple.append (v)
prev = v
return simple
def map_feature (feature1, feature2):
f1 = []
f2 = []
for u in feature1:
min = 20
m = None
index = None
for i, v in enumerate (feature2):
dist = np.linalg.norm (u - v)
if dist < min:
min = dist
m = v
index = i
if m is not None:
f1.append (u)
f2.append (m)
feature2.pop (index)
else:
f1.append (u)
f2.append (u)
f1 = np.array (f1).squeeze ()
f2 = np.array (f2).squeeze ()
return f1, f2
def segmentFeatures (fineFeatures, courseFeatures):
controlPoints = []
for u in courseFeatures:
ux, uy = u[0]
min_dst = 10000
m = None
for v in fineFeatures:
vx, vy = v[0]
dst = math.pow (ux - vx, 2) + math.pow (uy - vy, 2)
if dst < min_dst:
min_dst = dst
m = v
if m is not None:
controlPoints.append (m)
i = 0
currentSegment = []
allSegments = []
for u in fineFeatures:
if np.array_equal (u, controlPoints[i + 1]):
currentSegment.append (u)
allSegments.append (currentSegment)
currentSegment = [u]
i += 1
if i >= len (controlPoints):
break
else:
currentSegment.append (u)
if len (currentSegment) > 0:
currentSegment.append (fineFeatures[0])
allSegments.append (currentSegment)
return allSegments, controlPoints | mit | 3,110,566,440,364,656,000 | 19.402597 | 54 | 0.63121 | false |
nddsg/SimpleDBMS | simple_dbms/create_statement.py | 1 | 2985 | from sql_statement import SQLStatement
from catalog import Catalog
from operation_status import OperationStatus
import simple_dbms
try:
from bsddb import db
except ImportError:
from bsddb3 import db
class CreateStatement(SQLStatement, object):
def __init__(self, table, column_def_list):
"""
Constructs a CreateStatement object that will create the specified
table with the specified column information.
:param table:
:param column_def_list:
"""
super(CreateStatement, self).__init__(tables=[table], columns=column_def_list)
def execute(self):
table = self.get_table(0)
try:
# Add the column information to the Table object, and
# make sure there is only one primary-key column and no
# duplicate column names.
has_pk = False
for i in range(0, self.num_columns()):
col = self.get_column(i)
table.add_column(col)
if col.is_primary_key():
if has_pk:
raise (table + ": may not have more than one primary key column")
has_pk = True
for j in range(i + 1, self.num_columns()):
if col.get_name() == self.get_column(j).get_name():
raise table.get_name() + ": may not have two columns named " + col.get_name()
# Add the metadata for the table to the catalog, printing
# an error message if the table name is already in use.
if Catalog.put_metadata(table) == OperationStatus.KEYEXIST:
raise AttributeError(table.name + ": a table with this name already exists")
# create the BDB database for the table
d = db.DB(simple_dbms.SimpleDBMS.get_env())
txn = simple_dbms.SimpleDBMS.get_env().txn_begin()
simple_dbms.SimpleDBMS.push_txn(txn)
d.set_flags(0)
if has_pk:
d.open(filename=table.name, txn=txn, dbtype=db.DB_BTREE,
flags=db.DB_CREATE | db.DB_THREAD, mode=0666)
else:
d.open(filename=table.name, txn=txn, dbtype=db.DB_RECNO,
flags=db.DB_CREATE | db.DB_THREAD, mode=0666)
txn.commit()
simple_dbms.SimpleDBMS.pop_txn()
table.set_db(d)
print "Created table " + table.name + "."
except Exception as e:
print e
print "Could not create table " + table.name + "."
# Because we could not use Table.open() above, the table is not
# in the table cache. Thus, we need to close its DB handle here,
# or else it will never get closed!
#
# In the other commands, we will use Table.open(), so this
# step will not be necessary.
if table.get_db() is not None:
table.get_db().close()
| gpl-3.0 | -5,155,393,903,485,593,000 | 39.337838 | 101 | 0.558124 | false |
pytlakp/intranetref | src/intranet3/models/times.py | 1 | 1043 | import datetime
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import DateTime, Date, String, Integer, Float, Boolean
from intranet3.models import Base
class TimeEntry(Base):
__tablename__ = 'time_entry'
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False, index=True)
date = Column(Date, nullable=False, index=True)
time = Column(Float, nullable=False)
description = Column(String, nullable=False)
added_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
modified_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
timer_ts = Column(DateTime)
ticket_id = Column(Integer, nullable=True, index=True)
project_id = Column(Integer, ForeignKey('project.id'), nullable=False, index=True)
# TODO: task
deleted = Column(Boolean, nullable=False, default=False, index=True)
frozen = Column(Boolean, nullable=False, default=False, index=True)
| mit | 8,433,913,652,926,332,000 | 33.766667 | 86 | 0.705657 | false |
frew/simpleproto | scons-local-1.1.0/SCons/Tool/swig.py | 1 | 4609 | """SCons.Tool.swig
Tool-specific initialization for swig.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/swig.py 3603 2008/10/10 05:46:45 scons"
import os.path
import re
import SCons.Action
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
SwigAction = SCons.Action.Action('$SWIGCOM', '$SWIGCOMSTR')
def swigSuffixEmitter(env, source):
if '-c++' in SCons.Util.CLVar(env.subst("$SWIGFLAGS", source=source)):
return '$SWIGCXXFILESUFFIX'
else:
return '$SWIGCFILESUFFIX'
# Match '%module test', as well as '%module(directors="1") test'
_reModule = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)')
def _swigEmitter(target, source, env):
swigflags = env.subst("$SWIGFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(swigflags)
for src in source:
src = str(src.rfile())
mnames = None
if "-python" in flags and "-noproxy" not in flags:
if mnames is None:
mnames = _reModule.findall(open(src).read())
target.extend(map(lambda m, d=target[0].dir:
d.File(m + ".py"), mnames))
if "-java" in flags:
if mnames is None:
mnames = _reModule.findall(open(src).read())
java_files = map(lambda m: [m + ".java", m + "JNI.java"], mnames)
java_files = SCons.Util.flatten(java_files)
outdir = env.subst('$SWIGOUTDIR', target=target, source=source)
if outdir:
java_files = map(lambda j, o=outdir: os.path.join(o, j), java_files)
java_files = map(env.fs.File, java_files)
for jf in java_files:
t_from_s = lambda t, p, s, x: t.dir
SCons.Util.AddMethod(jf, t_from_s, 'target_from_source')
target.extend(java_files)
return (target, source)
def generate(env):
"""Add Builders and construction variables for swig to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.i'] = swigSuffixEmitter
cxx_file.suffix['.i'] = swigSuffixEmitter
c_file.add_action('.i', SwigAction)
c_file.add_emitter('.i', _swigEmitter)
cxx_file.add_action('.i', SwigAction)
cxx_file.add_emitter('.i', _swigEmitter)
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_file.suffix['.i'] = swigSuffixEmitter
java_file.add_action('.i', SwigAction)
java_file.add_emitter('.i', _swigEmitter)
env['SWIG'] = 'swig'
env['SWIGFLAGS'] = SCons.Util.CLVar('')
env['SWIGCFILESUFFIX'] = '_wrap$CFILESUFFIX'
env['SWIGCXXFILESUFFIX'] = '_wrap$CXXFILESUFFIX'
env['_SWIGOUTDIR'] = '${"-outdir " + str(SWIGOUTDIR)}'
env['SWIGPATH'] = []
env['SWIGINCPREFIX'] = '-I'
env['SWIGINCSUFFIX'] = ''
env['_SWIGINCFLAGS'] = '$( ${_concat(SWIGINCPREFIX, SWIGPATH, SWIGINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['SWIGCOM'] = '$SWIG -o $TARGET ${_SWIGOUTDIR} ${_SWIGINCFLAGS} $SWIGFLAGS $SOURCES'
expr = '^[ \t]*%[ \t]*(?:include|import|extern)[ \t]*(<|"?)([^>\s"]+)(?:>|"?)'
scanner = SCons.Scanner.ClassicCPP("SWIGScan", ".i", "SWIGPATH", expr)
env.Append(SCANNERS = scanner)
def exists(env):
return env.Detect(['swig'])
| bsd-2-clause | -4,627,851,629,485,158,000 | 38.059322 | 121 | 0.644174 | false |
eirmag/weboob | contrib/downloadboob/downloadboob.py | 1 | 7890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Alexandre Flament
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os
import re
import ConfigParser
from weboob.core import Weboob
from weboob.capabilities.video import ICapVideo
# hack to workaround bash redirection and encoding problem
import sys, codecs, locale
if sys.stdout.encoding is None:
(lang, enc) = locale.getdefaultlocale()
if enc is not None:
(e, d, sr, sw) = codecs.lookup(enc)
# sw will encode Unicode data to the locale-specific character set.
sys.stdout = sw(sys.stdout)
# end of hack
def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)
rx = re.compile(u'[ \\/\\?\\:\\>\\<\\!\\\\\\*]+', re.UNICODE)
def removeSpecial(s):
return rx.sub(u' ', u'%s' % s)
DOWNLOAD_DIRECTORY=".files"
class Downloadboob:
def __init__(self, backend_name, download_directory, links_directory):
self.download_directory = download_directory
self.links_directory = links_directory
self.backend_name = backend_name
self.backend = None
self.weboob = Weboob()
self.weboob.load_backends(modules=[self.backend_name])
self.backend=self.weboob.get_backend(self.backend_name)
def purge(self):
if not os.path.isdir(self.links_directory):
return
dirList=os.listdir(self.links_directory)
for local_link_name in dirList:
link_name = self.links_directory + "/" + local_link_name
if not self.check_link(link_name):
print u"Remove %s" % link_name
os.remove(link_name)
else:
print u"Keep %s" % link_name
def check_link(self, link_name):
if os.path.islink(link_name):
file_name = os.readlink(link_name)
absolute_file_name = os.path.join(self.links_directory, file_name)
if os.path.isfile(absolute_file_name):
return True
return False
else:
return True
def download(self, pattern=None, sortby=ICapVideo.SEARCH_RELEVANCE, nsfw=False, max_results=None, title_exclude=[]):
print "For backend %s, search for '%s'" % (backend_name, pattern)
# create directory for links
print " create link to %s" % self.links_directory
if not os.path.isdir(self.links_directory):
os.makedirs(self.links_directory)
# search for videos
count = 0
videos = []
l = list(self.backend.search_videos(pattern, sortby, nsfw, max_results))
for video in l:
if not self.is_downloaded(video):
self.backend.fill_video(video, ('url','title', 'url', 'duration'))
if not(self.is_excluded(video.title, title_exclude)):
print " %s\n Id:%s\n Duration:%s" % (video.title, video.id, video.duration)
videos.append(video)
else:
print "Already downloaded, check %s" % video.id
self.backend.fill_video(video, ('url','title', 'url', 'duration'))
self.set_linkname(video)
count=count+1
if count == max_results:
break
# download videos
print "Downloading..."
for video in videos:
self.do_download(video)
def is_excluded(self, title, title_exclude):
for exclude in title_exclude:
if title.find(exclude) > -1:
return True
return False
def get_filename(self, video, relative=False):
if relative:
directory = os.path.join("..", DOWNLOAD_DIRECTORY, self.backend_name)
else:
directory = os.path.join(self.download_directory, self.backend_name)
if not os.path.exists(directory):
os.makedirs(directory)
ext = video.ext
if not ext:
ext = 'avi'
return u"%s/%s.%s" % (directory, removeNonAscii(video.id), ext)
def get_linkname(self, video):
if not os.path.exists(self.links_directory):
os.makedirs(self.links_directory)
ext = video.ext
if not ext:
ext = 'avi'
misc = video.date
if not misc:
misc = video.id
return u"%s/%s (%s).%s" % (self.links_directory, removeSpecial(video.title), removeSpecial(misc), ext)
def is_downloaded(self, video):
# check if the file is 0 byte
return os.path.isfile(self.get_filename(video))
def set_linkname(self, video):
linkname = self.get_linkname(video)
idname = self.get_filename(video, relative=True)
absolute_idname = self.get_filename(video, relative=False)
if not os.path.islink(linkname) and os.path.isfile(absolute_idname):
print "%s -> %s" % (linkname, idname)
os.symlink(idname, linkname)
def do_download(self, video):
if not video:
print >>sys.stderr, 'Video not found: %s' % video
return 3
if not video.url:
print >>sys.stderr, 'Error: the direct URL is not available.'
return 4
def check_exec(executable):
with open('/dev/null', 'w') as devnull:
process = subprocess.Popen(['which', executable], stdout=devnull)
if process.wait() != 0:
print >>sys.stderr, 'Please install "%s"' % executable
return False
return True
dest = self.get_filename(video)
if video.url.startswith('rtmp'):
if not check_exec('rtmpdump'):
return 1
args = ('rtmpdump', '-e', '-r', video.url, '-o', dest)
elif video.url.startswith('mms'):
if not check_exec('mimms'):
return 1
args = ('mimms', video.url, dest)
else:
if not check_exec('wget'):
return 1
args = ('wget', video.url, '-O', dest)
os.spawnlp(os.P_WAIT, args[0], *args)
config = ConfigParser.ConfigParser()
config.read(['/etc/downloadboob.conf', os.path.expanduser('~/downloadboob.conf'), 'downloadboob.conf'])
links_directory=os.path.expanduser(config.get('main','directory', '.'))
links_directory=links_directory.decode('utf-8')
download_directory=os.path.join(links_directory, DOWNLOAD_DIRECTORY)
print "Downloading to %s" % (links_directory)
for section in config.sections():
if section != "main":
backend_name=config.get(section, "backend")
pattern=config.get(section, "pattern")
if config.has_option(section, "title_exclude"):
title_exclude=config.get(section, "title_exclude").split('|')
else:
title_exclude=[]
max_result=config.getint(section, "max_results")
section_sublinks_directory=config.get(section,"directory")
section_links_directory=os.path.join(links_directory, section_sublinks_directory)
downloadboob = Downloadboob(backend_name, download_directory, section_links_directory)
downloadboob.purge()
# FIXME sortBy, title.match
downloadboob.download(pattern, ICapVideo.SEARCH_DATE, False, max_result, title_exclude)
| agpl-3.0 | -1,746,346,105,877,346,800 | 33.757709 | 120 | 0.604689 | false |
jakemalley/training-log | traininglog/admin/views.py | 1 | 4565 | # admin/views.py
# Jake Malley
# 19/02/15
"""
Define the routes for the admin blueprint.
"""
# Imports
from flask import redirect, render_template, \
request, url_for, Blueprint, abort, flash
from flask.ext.login import fresh_login_required, current_user
from traininglog import db
from traininglog.models import Member, Exercise
from forms import AdminEditDetailsForm
from functools import wraps
from datetime import datetime
# Setup the admin blueprint.
admin_blueprint = Blueprint(
'admin',__name__,
template_folder='templates'
)
# Admin Required - Only allows members with is_admin = 1 to access these views.
# Allows me to use the decorator @admin_required on different routes.
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# If they are not an admin take them home.
if not bool(current_user.is_admin):
return redirect(url_for('home.index'))
return f(*args, **kwargs)
return decorated_function
# Define the routes
@admin_blueprint.route('/')
@fresh_login_required
@admin_required
def dashboard():
"""
The dashboard for the admin blueprint.
"""
# Get a list of all the members.
members = Member.query.all()
# Get a list of all the active members. (Members who have logged in today.)
active_members = Member.query.filter(Member.last_login_date>datetime.utcnow().date()).all()
# Create a dictionary of the stats.
stats = {
"total_members":len(members), # Total number of members.
"active_members":len(active_members) # Total number of active members.
}
# Render the admin index page passing in the members and stats.
return render_template('admin_index.html', members=members,stats=stats)
@admin_blueprint.route('/view/<member_id>', methods=['POST','GET'])
@fresh_login_required
@admin_required
def view(member_id):
"""
Method for viewing individual users.
"""
# Create the form to edit the members data.
admin_edit_form = AdminEditDetailsForm()
# If the method was post and the form was valid.
if request.method == 'POST' and admin_edit_form.validate_on_submit():
# Change the data.
# Get the member with that ID.
member = Member.query.filter_by(id=admin_edit_form.member_id.data).first()
# See if the account was marked to be delete as then we don't need to update the details as well.
if bool(admin_edit_form.delete_user.data) == True:
# Delete the user.
# Get their exercise data.
exercise_data = Exercise.query.filter_by(member=member).all()
# For each piece of data.
for data in exercise_data:
# Delete the data.
db.session.delete(data)
# Finally delete the user.
db.session.delete(member)
# And commit the changes
db.session.commit()
# Flash a message.
flash('Account has been delete!')
# Redirect to the admin dashboard sicne that user doesn't exist anymore.
return redirect(url_for('admin.dashboard'))
else:
# User was not marked as deleted,
# update their details with the details from the form.
member.firstname = admin_edit_form.firstname.data
member.surname = admin_edit_form.surname.data
member.email = admin_edit_form.email.data
member.set_active_status(int(admin_edit_form.set_active.data))
member.is_admin = int(admin_edit_form.set_admin.data)
# If the password was changed.
if admin_edit_form.password.data:
# Update the password.
member.update_password(admin_edit_form.password.data)
# Flash a success message.
flash("Details have been updated. Please inform the member of the changes.")
# Commit the changes.
db.session.commit()
# Refresh the page
return render_template('admin_view.html', member=member, admin_edit_form=admin_edit_form)
else:
# Get the member with that ID.
member = Member.query.filter_by(id=member_id).first()
# If that member exists.
if member is not None:
# Render the template passing in the member and form.
return render_template('admin_view.html', member=member, admin_edit_form=admin_edit_form)
else:
# Raise a HTTP 404 (Page not found) error.
abort(404) | mit | -4,846,714,409,154,546,000 | 33.854962 | 105 | 0.635487 | false |
jiansenzheng/oanda_trading | oanda_trading/forex_trading_general_171005.py | 1 | 27162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 06 20:00:30 2016
@author: Jiansen
"""
import requests
import threading
import copy
import logging
import os
#import urllib3
import json
from scipy import stats
#from decimal import Decimal, getcontext, ROUND_HALF_DOWN
#from event00 import TickEvent,TickEvent2
#import time
import oandapy
import httplib
import pandas as pd
import math
import numpy as np
import pywt
import time
from settings import STREAM_DOMAIN, API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID
from trading_events import Event,TickEvent2,LiqEvent,OrderEvent,CloseEvent
from trading_global_functions import *
from trading_log import log_dict
import Queue
#for writing data
import datetime
from bson.objectid import ObjectId
import pymongo as pm
from pymongo import MongoClient
import statsmodels.tsa.stattools as ts
#requests.adapters.DEFAULT_RETRIES = 5
from warningOps import warning
from seriesADF import getADF
corpid= ''
secret=''
warn = warning(corpid,secret)
#------the only line we need to change is about the instruments----#
pairs = "EUR_USD"
#-----------------------------------------------------------------------#
client = MongoClient('localhost',27017)
db = client.test_database
#---------------Initialize the parameters and database connections-------#
if pairs == "EUR_USD":
try:
from param_EUR_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_test
index_collect = db.index_EUR_USD
elif pairs == "USD_CNH":
try:
from param_USD_CNH import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_USD_CNH
index_collect = db.index_USD_CNH
elif pairs == "AUD_USD":
try:
from param_AUD_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_AUD_USD
index_collect = db.index_AUD_USD
else:
raise ValueError('Invalid <pairs>, CANNOT FIND THE INSTRUMENTS!')
#-----------------------------------------------------------------------#
#--------------------------Liquidity Index------------------------------#
#-----------------------------------------------------------------------#
class LiqForex(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def getLiq(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session()
#s.keep_alive = False
url = "https://" + self.domain + "/v1/candles"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instrument':self.instruments, 'accountId' : self.account_id,
'count':self.ct,'candleFormat':'midpoint','granularity':self.gran}
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
logging.info( pre)
resp = s.send(pre, stream=False, verify=False)
try:
msg=json.loads(resp.text)
except Exception as e:
logging.warning( "Caught exception when converting message into json\n" + str(e))
return
if msg.has_key("candles"):
time0=msg.get("candles")[-1]["time"]
lis = ohlcv_lis(msg.get("candles"))
liqS = pd.Series()
for i in range(0, len(lis)- (self.dd+1) ,1):
s2 = liq15min(lis[i:i+self.dd])
liqS = np.append(liqS,s2)
liq=liqS[-1]
logging.info( "liq=".format(liq))
tev = LiqEvent(self.instruments,time0,liq)
self.events_queue.put(tev,False)
post_metric = get_indicator(self.instruments,None,None,self.gran,liq,None,None)
index_collect.insert_one(post_metric)
except Exception as e:
s.close()
content0 = "Caught exception when connecting to history\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def activeLiq(self,period):
while True:
self.getLiq()
time.sleep(period)
#--------------------------------------------------------------------#
class StreamingForexPrices(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def connect_to_stream(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session() # socket
url = "https://" + self.domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instruments' : self.instruments, 'accountId' : self.account_id}
time.sleep(0.8) # sleep some seconds
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
resp = s.send(pre, stream=True, verify=False)
return resp
except Exception as e:
#global s
s.close()
content0 = "Caught exception when connecting to stream\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def stream_to_queue_old(self,collection):
response = self.connect_to_stream()
if response.status_code != 200:
return
try:
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
content0 = "Caught exception when converting message into json\n" + str(e)
logging.warning(content0)
return
if msg.has_key("instrument") or msg.has_key("tick"):
logging.info(msg)
instrument = msg["tick"]["instrument"]
time0 = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
tev = TickEvent2(instrument, time0, bid, ask)
self.events_queue.put(tev,False)
post= getDoc(msg)
collection.insert_one(post)
except Exception as e:
logging.warning('Caught ChunkedEncodingError in stream_to_queue_old()!'+str(time.ctime()))
return
#--------------
#------
# new strategy
class LiqMAStrategy(object):
"""
"""
def __init__(
self, access_token, account_id, pairs, units, events, stopLoss1, takeProfit1,stopLoss2, takeProfit2,
short_window1, long_window1,short_window2, long_window2, idxU, lam, thres1, thres2,thres3, thres4, adf_thres
):
self.access_token = access_token
self.account_id = account_id
self.pairs = pairs
self.units = units
self.stopLoss1 = stopLoss1
self.takeProfit1 = takeProfit1
self.stopLoss2 = stopLoss2
self.takeProfit2 = takeProfit2
self.pairs_dict = self.create_pairs_dict()
self.events = events
self.short_window1 = short_window1
self.long_window1 = long_window1
self.short_window2 = short_window2
self.long_window2 = long_window2
self.idxU = idxU
self.lam = lam
self.priceLis1 = pd.Series() #for trends
self.priceLis2 = pd.Series() #for reversion
self.thres1 = thres1
self.thres2 = thres2
self.thres3 = thres3
self.thres4 = thres4
self.adf_thres = adf_thres
#---intermediates---#
self.SL_TP = {"trends":[self.stopLoss1,self.takeProfit1],
"reversion":[self.stopLoss2,self.takeProfit2]}
self.s_l_window = {"trends":[self.short_window1,self.long_window1],
"reversion":[self.short_window2,self.long_window2]}
self.thres_tre_rev = {"trends":[self.thres1, self.thres2],
"reversion":[self.thres3,self.thres4]}
def create_pairs_dict(self):
attr_dict = {
"ticks": 0,
"tick0": 0,
"priceLS":0.0,
"invested": False,
"short_sma": None,
"long_sma": None,
"longShort": None,
"short_slope":None,
"long_slope":None, # False denotes sell, while True denotes buy
"check": False,
"orlis":[0,0,0,0],
"stra": 0,
"fixed": False
}
#pairs_dict = {}
pairs_dict = copy.deepcopy(attr_dict)
return pairs_dict
def check_order(self,check):
if check== True:
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseTrades = oanda0.get_trades(self.account_id,instrument=self.pairs)
except Exception as e:
logging.warning('Caught exception in get_trades() of check_order()!\n'+str(time.ctime()))
return
if responseTrades.get("trades")==[]:
pd = self.pairs_dict
pd["orlis"].pop(0)
logging.info(" orlis: "+str(pd["orlis"]))
pd["orlis"].append(0)
logging.info(" orlis: "+str(pd["orlis"]))
if pd["orlis"][0:4]==[1,1,0,0]:
logging.warning( "Stop Loss Order Executed!")
#warn.tradingWarning(" Stop Loss Order Executed!")
pd["invested"]= False
pd["fixed"] = False #position closed, the stra type is free
pd["check"] = False
else:
pass
else:
pd = self.pairs_dict
#pd["orlis"][0] = copy.copy(pd["orlis"][1])
pd["orlis"].pop(0)
pd["orlis"].append(1)
logging.info("not empty- orlis: "+str(pd["orlis"]))
pd["invested"]= True
pd["fixed"] = True #position closed, the stra type is free
pd["check"] = True
else:
pass
def compute_slope(self,price_lis,window_length,k):
'''[summary]
compute the slope ratio for a short time series
Arguments:
price_lis {np.ndarray} -- the filtered time series to compute the slope ratio
for both SMA and LMA
default: newPriceLis
window_length {[type]} -- a parameter for the SMA
k: an parameter for performing average, default->0.5
default: self.short_window2
Returns:
[float] -- [the slope ratio]
'''
amp = lambda lis: (lis-lis[0])*10000.0
pShort = amp(price_lis[-window_length:])
pLong = amp(price_lis)
#compute the slope ratio
aveSlope = k*getSlope(pShort)+ (1-k)*getSlope(pLong)
return aveSlope
def set_invested_check_fixed(self,pair_dict,invested_bool,check_bool,fixed_bool):
pair_dict["invested"] = invested_bool
pair_dict["check"] = check_bool
pair_dict["fixed"] = fixed_bool
time.sleep(0.0)
def get_sl_tp(self,TreRev):
return self.SL_TP[TreRev]
def insert_metric(self,collection,pair_dict):
'''
default collection: index_USD_CNH
'''
short_window,long_window = self.s_l_window[pair_dict["stra"]]
post_metric = get_indicator(self.pairs,short_window,long_window,
None,None,pair_dict["short_slope"],pair_dict["long_slope"])
collection.insert_one(post_metric)
#----------------#
def buy_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price02={0}".format(price0))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_b, tp_b= round(price0 - fixSL,5),round(price1 + fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_b, tp_b,"Trends")
self.events.put(order)
pd["longShort"] = True
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price0
def sell_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price01={0}".format(price1))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_s,tp_s = round(price1 + fixSL,5),round(price0 - fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_s, tp_s,"Trends")
self.events.put(order)
pd["longShort"] = False
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price1
def logging_invested(self,priceLis,pd,sign):
TreRev = pd["stra"]
logging.info(TreRev+" position!")
#??? TODO 23:38 Oct 5, 2017
short_window = self.s_l_window[TreRev][0]
newPriceLis = get_new_price_lis(priceLis, pd, short_window)
basePrice=pd["priceLS"]+sign*self.lam*np.std(priceLis)*np.sqrt(pd["ticks"]-pd["tick0"])
logging.info( "basePrice="+str(basePrice))
logging.info( "short_sma"+str(pd["short_sma"]))
logging.info( "long_sma"+str(pd["long_sma"]))
aveSlope = self.compute_slope(newPriceLis,short_window, 0.5)
logging.info( "aveSlope="+str(aveSlope))
return aveSlope
def put_close_order(self,pairs,num):
'''
pairs,num = self.pairs,0
'''
order_closed = CloseEvent(pairs,num)
self.events.put(order_closed)
#--------------------------------------#
def open_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope > thres)
def open_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope < -thres)
def open_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope< -thres)
def open_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope> thres)
#-----------------------------------------------#
def close_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["longShort"] and aveSlope < thres)
def close_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (not pd["longShort"] and aveSlope > -thres)
def close_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] > pd["long_sma"]*(1+thres/100.0) and pd["longShort"])
def close_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] < pd["long_sma"]*(1-thres/100.0) and not pd["longShort"])
#--------------------------------------#
def calculate_signals(self, event):
#if True:
global liqIndex
global newPriceLis
if event.type == 'TICK':
price = (event.bid+event.ask)/2.000
self.priceLis1 = np.append(self.priceLis1,price)
self.priceLis2 = np.append(self.priceLis2,price)
if len(self.priceLis1)>max([self.long_window1,self.long_window2]):
self.priceLis1=self.priceLis1[-self.long_window1:]
self.priceLis2=self.priceLis2[-self.long_window2:]
else:
pass
#liqIndex= event.liq
logging.info("liqIndex= "+str(liqIndex)+"\n")
logging.info("price= "+str(price))
pd = self.pairs_dict
logging.info("check"+str(pd["check"]))
self.check_order(pd["check"]) #check whether the SLTP order is triggered..
# Only start the strategy when we have created an accurate short window
logging.info("INVESTED= "+str(pd["invested"]))
if not pd["invested"]:
#global price0
if pd["ticks"]>max([self.long_window1, self.long_window2])+1 and liqIndex > self.idxU:
if not pd["fixed"]:
critAdf = getADF(collection).priceADF(200,1)
if critAdf > self.adf_thres:
pd["stra"] = "reversion"
newPriceLis = get_new_price_lis(self.priceLis2, pd, self.short_window2)
aveSlope = self.compute_slope(newPriceLis,self.short_window2, 0.5)
logging.info( "REVERSION+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
pd["stra"] = "trends"
newPriceLis = get_new_price_lis(self.priceLis1, pd, self.short_window1)
aveSlope = self.compute_slope(newPriceLis,self.short_window1, 0.5)
logging.info("TRENDS+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
raise ValueError("pd[fixed] should be False!")
price0, price1 = event.bid, event.ask
if pd["stra"] =="trends":
if self.open_trends_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_trends_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
elif pd["stra"] =="reversion":
if self.open_reversion_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_reversion_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
else:
pass
else:
pass
elif pd["invested"]:
sign= 1 if pd["longShort"] == True else -1
if pd["stra"] =="trends":
aveSlope = self.logging_invested(self.priceLis1,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_trends_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_trends_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else: #not closing positions, just keep the pd["fixed"] as True.
pd["fixed"] = True #should we add pd["invested"]
elif pd["stra"] =="reversion":
aveSlope=self.logging_invested(self.priceLis2,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_reversion_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_reversion_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else:
pd["fixed"] = True #should we add pd["invested"]
else:
pass
pd["ticks"] += 1
logging.info("current Tick "+str(pd["ticks"])+"\n"+str(time.ctime()))
#--------------------------------------------------------------------#
class Execution(object):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.conn = self.obtain_connection()
def obtain_connection(self):
return httplib.HTTPSConnection(self.domain)
def execute_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseX = oanda0.create_order(self.account_id,
instrument=event.instrument,
units= event.units,
side= event.side,
type= event.order_type,
stopLoss = event.stopLoss,
takeProfit = event.takeProfit
)
except Exception as e:
content0 = "Caught OnadaError when sending the orders\n" + str(e)
logging.warning(content0)
return
logging.info( "Execute Order ! \n {0}".format(responseX))
content0 = str(event.stra)+"Execute Order ! "+" "+str(event.side)+" "+ str(event.units)+" units of "+str(event.instrument)
#warn.tradingWarning(content0)
logging.info(content0)
def close_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
response1= oanda0.get_trades(self.account_id,instrument=event.instrument)
order_lis= response1["trades"]
if order_lis !=[]:
for order in order_lis: #close all trades
responseX = oanda0.close_trade(self.account_id,trade_id= order['id'])
logging.info( "Close Order ! \n {0}".format(responseX))
content0 = "Close Order !" + "profit: "+str(responseX['profit'])+" CLOSE "+str(responseX['instrument'])
content0 = content0 + " "+str(responseX['side'])+" at "+ str(responseX['price'])
#warn.tradingWarning(content0)
else:
logging.warning("No trade to be closed! :{0}".format(time.ctime()))
#--------------------------------------------------------------------#
def trade(events, strategy,execution,heartbeat):
"""
"""
global liqIndex
while True:
try:
event = events.get(False)
except Queue.Empty:
pass
else:
if event is not None:
if event.type =='LIQ':
liqIndex= event.liq
#print "current index ="+str(liqIndex)
elif event.type == 'TICK':
strategy.calculate_signals(event)
logging.info( "Tick!")
elif event.type == 'ORDER':
logging.info( "Executing order!")
execution.execute_order(event)
elif event.type == "CLOSE":
logging.info( "Close trading!")
execution.close_order(event)
time.sleep(heartbeat)
#--------------------------------------------------------------------#
if __name__ == "__main__":
logPath,logName = log_dict[pairs]["path"],log_dict[pairs]["name"]
logging.basicConfig(filename= os.path.join(logPath,logName),
format='%(levelname)s:%(message)s',level=logging.DEBUG)
global liqIndex
liqIndex=0
ct = 20
gran ='M15'
time_dict = {
"S5": 5,
"S10": 10,
"S15": 15,
"S30": 30,
"M1": 60,
"M2": 120 }
dd = 11
lam= 0.1 #0.5 basePrice tuning
units = 100 #100
#----------Parameters----------------
short_window1= MA_dict['short_window1']
long_window1 = MA_dict['long_window1']
short_window2= MA_dict['short_window2']
long_window2 = MA_dict['long_window2']
idxu = threshold_dict['idxu']
thres1= threshold_dict['thres1']
thres2= threshold_dict['thres2']
thres3 = threshold_dict['thres3']
thres4= threshold_dict['thres4']
adf_thres = threshold_dict['adf_thres']
sl1 = sltp_dict['sl1'] #10
tp1 = sltp_dict['tp1'] #10
sl2 = sltp_dict['sl2'] #10
tp2 = sltp_dict['tp2'] #10
#--------------------------------------
heartbeat= 0.2
period= 600
print 'initial'
print('MA:\n sw1 {0} lw1 {1} sw2 {2} lw2 {3}'.format(short_window1, long_window1, short_window2, long_window2))
print('parameters:\n thres1 {0} thres2 {1} thres3 {2} thres4 {3}'.format(thres1,thres2,thres3,thres4))
print('sltp_parameters:\n {0} {1} {2} {3}'.format(sl1,tp1,sl2,tp2))
events = Queue.Queue()
# initial the threads
prices = StreamingForexPrices(STREAM_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
liquidity = LiqForex(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
execution = Execution(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID)
#strategy = MovingAverageCrossStrategy(pairs, units, events, sl, tp, short_window,long_window)
strategy = LiqMAStrategy(ACCESS_TOKEN, ACCOUNT_ID, pairs, units, events, sl1, tp1, sl2, tp2, short_window1,long_window1,
short_window2,long_window2,idxu,lam,thres1,thres2,thres3,thres4,adf_thres)
# construct the thread
price_thread = threading.Thread(target=prices.stream_to_queue_old, args=[collection])
liq_thread = threading.Thread(target= liquidity.activeLiq, args=[period])
trade_thread = threading.Thread(target=trade, args=(events, strategy,execution,heartbeat))
print "Full?:",events.full()
trade_thread.start()
price_thread.start()
liq_thread.start()
| gpl-3.0 | -6,703,210,643,960,976,000 | 40.723502 | 130 | 0.533024 | false |
catapult-project/catapult | dashboard/dashboard/pinpoint/models/cas.py | 3 | 2757 | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Model for storing information to look up CAS from RBE.
A CASReference instance contains metadata that allows us to use RBE-CAS
digests when starting Swarming tasks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from google.appengine.ext import ndb
CAS_EXPIRY_DURATION = datetime.timedelta(days=88)
def Get(builder_name, change, target):
"""Retrieve an cas reference from the Datastore.
Args:
builder_name: The name of the builder that produced the cas reference.
change: The Change the cas reference was built at.
target: The compile target the cas reference is for.
Returns:
A tuple containing the cas_instance and cas_digest as strings.
"""
entity = ndb.Key(CASReference, _Key(builder_name, change, target)).get()
if not entity:
raise KeyError('No cas reference with builder %s, '
'change %s, and target %s.' %
(builder_name, change, target))
if entity.created + CAS_EXPIRY_DURATION < datetime.datetime.utcnow():
raise KeyError('Cas reference with builder %s, '
'change %s, and target %s was '
'found, but is expired.' % (builder_name, change, target))
return entity.cas_instance, entity.cas_digest
def Put(cas_references):
"""Add CASReference to the Datastore.
This function takes multiple entries to do a batched Datstore put.
Args:
cas_references: An iterable of tuples. Each tuple is of the form
(builder_name, change, target, cas_instance, cas_digest).
"""
ndb.put_multi(
CASReference(
cas_instance=cas_instance,
cas_digest=cas_digest,
id=_Key(builder_name, change, target),
) for builder_name, change, target, cas_instance, cas_digest
in cas_references)
class CASReference(ndb.Model):
cas_instance = ndb.StringProperty(indexed=False, required=True)
cas_digest = ndb.StringProperty(indexed=False, required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
# We can afford to look directly in Datastore here since we don't expect to
# make multiple calls to this at a high rate to benefit from being in
# memcache. This lets us clear out the cache in Datastore and not have to
# clear out memcache as well.
_use_memcache = False
_use_datastore = True
_use_cache = False
def _Key(builder_name, change, target):
# The key must be stable across machines, platforms,
# Python versions, and Python invocations.
return '\n'.join((builder_name, change.id_string, target))
| bsd-3-clause | 5,540,963,464,776,257,000 | 33.037037 | 77 | 0.700762 | false |
googleinterns/server-side-identity | tests/crypt/test__python_rsa.py | 1 | 7625 | # Copyright 2016 Google LLC
# Modifications: Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest.mock as mock
from pyasn1_modules import pem
import pytest
import rsa
import six
from gsi.verification import _helpers
from gsi.verification.crypt import _python_rsa
from gsi.verification.crypt import base
DATA_DIR = os.path.join(os.path.dirname(__file__), "..", "data")
# To generate privatekey.pem, privatekey.pub, and public_cert.pem:
# $ openssl req -new -newkey rsa:1024 -x509 -nodes -out public_cert.pem \
# > -keyout privatekey.pem
# $ openssl rsa -in privatekey.pem -pubout -out privatekey.pub
with open(os.path.join(DATA_DIR, "privatekey.pem"), "rb") as fh:
PRIVATE_KEY_BYTES = fh.read()
PKCS1_KEY_BYTES = PRIVATE_KEY_BYTES
with open(os.path.join(DATA_DIR, "privatekey.pub"), "rb") as fh:
PUBLIC_KEY_BYTES = fh.read()
with open(os.path.join(DATA_DIR, "public_cert.pem"), "rb") as fh:
PUBLIC_CERT_BYTES = fh.read()
# To generate pem_from_pkcs12.pem and privatekey.p12:
# $ openssl pkcs12 -export -out privatekey.p12 -inkey privatekey.pem \
# > -in public_cert.pem
# $ openssl pkcs12 -in privatekey.p12 -nocerts -nodes \
# > -out pem_from_pkcs12.pem
with open(os.path.join(DATA_DIR, "pem_from_pkcs12.pem"), "rb") as fh:
PKCS8_KEY_BYTES = fh.read()
with open(os.path.join(DATA_DIR, "privatekey.p12"), "rb") as fh:
PKCS12_KEY_BYTES = fh.read()
# The service account JSON file can be generated from the Google Cloud Console.
SERVICE_ACCOUNT_JSON_FILE = os.path.join(DATA_DIR, "service_account.json")
with open(SERVICE_ACCOUNT_JSON_FILE, "r") as fh:
SERVICE_ACCOUNT_INFO = json.load(fh)
class TestRSAVerifier(object):
def test_verify_success(self):
to_sign = b"foo"
signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
actual_signature = signer.sign(to_sign)
verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
assert verifier.verify(to_sign, actual_signature)
def test_verify_unicode_success(self):
to_sign = u"foo"
signer = _python_rsa.RSASigner.from_string(PRIVATE_KEY_BYTES)
actual_signature = signer.sign(to_sign)
verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
assert verifier.verify(to_sign, actual_signature)
def test_verify_failure(self):
verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
bad_signature1 = b""
assert not verifier.verify(b"foo", bad_signature1)
bad_signature2 = b"a"
assert not verifier.verify(b"foo", bad_signature2)
def test_from_string_pub_key(self):
verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_KEY_BYTES)
assert isinstance(verifier, _python_rsa.RSAVerifier)
assert isinstance(verifier._pubkey, rsa.key.PublicKey)
def test_from_string_pub_key_unicode(self):
public_key = _helpers.from_bytes(PUBLIC_KEY_BYTES)
verifier = _python_rsa.RSAVerifier.from_string(public_key)
assert isinstance(verifier, _python_rsa.RSAVerifier)
assert isinstance(verifier._pubkey, rsa.key.PublicKey)
def test_from_string_pub_cert(self):
verifier = _python_rsa.RSAVerifier.from_string(PUBLIC_CERT_BYTES)
assert isinstance(verifier, _python_rsa.RSAVerifier)
assert isinstance(verifier._pubkey, rsa.key.PublicKey)
def test_from_string_pub_cert_unicode(self):
public_cert = _helpers.from_bytes(PUBLIC_CERT_BYTES)
verifier = _python_rsa.RSAVerifier.from_string(public_cert)
assert isinstance(verifier, _python_rsa.RSAVerifier)
assert isinstance(verifier._pubkey, rsa.key.PublicKey)
def test_from_string_pub_cert_failure(self):
cert_bytes = PUBLIC_CERT_BYTES
true_der = rsa.pem.load_pem(cert_bytes, "CERTIFICATE")
load_pem_patch = mock.patch(
"rsa.pem.load_pem", return_value=true_der + b"extra", autospec=True
)
with load_pem_patch as load_pem:
with pytest.raises(ValueError):
_python_rsa.RSAVerifier.from_string(cert_bytes)
load_pem.assert_called_once_with(cert_bytes, "CERTIFICATE")
class TestRSASigner(object):
def test_from_string_pkcs1(self):
signer = _python_rsa.RSASigner.from_string(PKCS1_KEY_BYTES)
assert isinstance(signer, _python_rsa.RSASigner)
assert isinstance(signer._key, rsa.key.PrivateKey)
def test_from_string_pkcs1_unicode(self):
key_bytes = _helpers.from_bytes(PKCS1_KEY_BYTES)
signer = _python_rsa.RSASigner.from_string(key_bytes)
assert isinstance(signer, _python_rsa.RSASigner)
assert isinstance(signer._key, rsa.key.PrivateKey)
def test_from_string_pkcs8(self):
signer = _python_rsa.RSASigner.from_string(PKCS8_KEY_BYTES)
assert isinstance(signer, _python_rsa.RSASigner)
assert isinstance(signer._key, rsa.key.PrivateKey)
def test_from_string_pkcs8_extra_bytes(self):
key_bytes = PKCS8_KEY_BYTES
_, pem_bytes = pem.readPemBlocksFromFile(
six.StringIO(_helpers.from_bytes(key_bytes)), _python_rsa._PKCS8_MARKER
)
key_info, remaining = None, "extra"
decode_patch = mock.patch(
"pyasn1.codec.der.decoder.decode",
return_value=(key_info, remaining),
autospec=True,
)
with decode_patch as decode:
with pytest.raises(ValueError):
_python_rsa.RSASigner.from_string(key_bytes)
# Verify mock was called.
decode.assert_called_once_with(pem_bytes, asn1Spec=_python_rsa._PKCS8_SPEC)
def test_from_string_pkcs8_unicode(self):
key_bytes = _helpers.from_bytes(PKCS8_KEY_BYTES)
signer = _python_rsa.RSASigner.from_string(key_bytes)
assert isinstance(signer, _python_rsa.RSASigner)
assert isinstance(signer._key, rsa.key.PrivateKey)
def test_from_string_pkcs12(self):
with pytest.raises(ValueError):
_python_rsa.RSASigner.from_string(PKCS12_KEY_BYTES)
def test_from_string_bogus_key(self):
key_bytes = "bogus-key"
with pytest.raises(ValueError):
_python_rsa.RSASigner.from_string(key_bytes)
def test_from_service_account_info(self):
signer = _python_rsa.RSASigner.from_service_account_info(SERVICE_ACCOUNT_INFO)
assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
assert isinstance(signer._key, rsa.key.PrivateKey)
def test_from_service_account_info_missing_key(self):
with pytest.raises(ValueError) as excinfo:
_python_rsa.RSASigner.from_service_account_info({})
assert excinfo.match(base._JSON_FILE_PRIVATE_KEY)
def test_from_service_account_file(self):
signer = _python_rsa.RSASigner.from_service_account_file(
SERVICE_ACCOUNT_JSON_FILE
)
assert signer.key_id == SERVICE_ACCOUNT_INFO[base._JSON_FILE_PRIVATE_KEY_ID]
assert isinstance(signer._key, rsa.key.PrivateKey)
| apache-2.0 | -869,925,340,974,221,600 | 38.102564 | 87 | 0.679475 | false |
Boris-Barboris/rsoi | lab03/local_library/app_local_library/models.py | 1 | 2941 | from django.db import models
import json
import logging
from .clients import *
log = logging.getLogger('app_logging')
# Create your models here.
class Book(models.Model):
isbn = models.CharField(max_length=20, blank=False)
BORROWED = 'brwed'
FREE = 'free'
state_choices = (
(BORROWED, 'borrowed'),
(FREE, 'free'),
)
state = models.CharField(max_length=20, choices=state_choices, default=FREE)
borrow_id = models.IntegerField(null=True, default=None)
def to_dict(self):
return {
'id': self.id,
'isbn': self.isbn,
'state': self.state,
'borrow_id': self.borrow_id,
}
def to_json(self):
return json.dumps(self.to_dict())
# exceptions
class BookAlreadyExists(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} already exists'.format(id))
self.id = id
class PrintDoesNotExist(Exception):
def __init__(self, isbn):
Exception.__init__(self, 'Print isbn={} does not exists'.format(isbn))
self.isbn = isbn
class AlreadyBorrowed(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} is already borrowed'.format(id))
self.id = id
class AlreadyFree(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} is already free'.format(id))
self.id = id
# model operations
def get_status(isbn):
free_books = Book.objects.filter(isbn=isbn).filter(state=Book.FREE)
log.debug('free_books = ' + str(free_books))
log.debug('free_books len = ' + str(len(free_books)))
if len(free_books) > 0:
return True
else:
return False
def create_book(id, isbn, me):
try:
book = Book.objects.get(id=id)
raise BookAlreadyExists(id)
except Book.DoesNotExist:
pass
# validate isbn
token = me['token']
br = book_registry_client(token)
p = br.list_prints(isbn=isbn)
if p['total'] == 0:
raise PrintDoesNotExist(isbn)
book = Book(id=id, isbn=isbn)
book.save()
return book
def delete_book(id):
book = Book.objects.get(id=id)
book.delete()
def get_book(id):
book = Book.objects.get(id=id)
return book
def get_books_isbn(isbn):
books = Book.objects.filter(isbn=isbn)
return books
def get_books():
return Book.objects.all()
def borrow_book(id, borrow_id):
book = Book.objects.get(id=id)
if book.state == Book.FREE:
book.borrow_id = borrow_id
book.state = Book.BORROWED
book.save()
else:
raise AlreadyBorrowed(id)
def return_book(id):
book = Book.objects.get(id=id)
if book.state == Book.BORROWED:
book.borrow_id = None
book.state = Book.FREE
book.save()
else:
raise AlreadyFree(id) | mit | 1,966,678,895,610,190,300 | 25.035398 | 80 | 0.584495 | false |
EderSantana/seya | seya/parzen.py | 1 | 2543 | """
This file was copyed from pyleran2.distributions.parzen.py
Their license is BSD clause-3: https://github.com/lisa-lab/pylearn2/
"""
import numpy
import theano
T = theano.tensor
def log_mean_exp(a):
"""
We need the log-likelihood, this calculates the logarithm
of a Parzen window
"""
max_ = a.max(1)
return max_ + T.log(T.exp(a - max_.dimshuffle(0, 'x')).mean(1))
def make_lpdf(mu, sigma):
"""
Makes a Theano function that allows the evalution of a Parzen windows
estimator (aka kernel density estimator) where the Kernel is a normal
distribution with stddev sigma and with points at mu.
Parameters
-----------
mu : numpy matrix
Contains the data points over which this distribution is based.
sigma : scalar
The standard deviation of the normal distribution around each data \
point.
Returns
-------
lpdf : callable
Estimator of the log of the probability density under a point.
"""
x = T.matrix()
mu = theano.shared(mu)
a = (x.dimshuffle(0, 'x', 1) - mu.dimshuffle('x', 0, 1)) / sigma
E = log_mean_exp(-0.5*(a**2).sum(2))
Z = mu.shape[1] * T.log(sigma * numpy.sqrt(numpy.pi * 2))
return theano.function([x], E - Z)
class ParzenWindows(object):
"""
Parzen Window estimation and log-likelihood calculator.
This is usually used to test generative models as follows:
1 - Get 10k samples from the generative model
2 - Contruct a ParzenWindows object with the samples from 1
3 - Test the log-likelihood on the test set
Parameters
----------
samples : numpy matrix
See description for make_lpdf
sigma : scalar
See description for make_lpdf
"""
def __init__(self, samples, sigma):
# just keeping these for debugging/examination, not needed
self._samples = samples
self._sigma = sigma
self.lpdf = make_lpdf(samples, sigma)
def get_ll(self, x, batch_size=10):
"""
Evaluates the log likelihood of a set of datapoints with respect to the
probability distribution.
Parameters
----------
x : numpy matrix
The set of points for which you want to evaluate the log \
likelihood.
"""
inds = range(x.shape[0])
n_batches = int(numpy.ceil(float(len(inds)) / batch_size))
lls = []
for i in range(n_batches):
lls.extend(self.lpdf(x[inds[i::n_batches]]))
return numpy.array(lls).mean()
| bsd-3-clause | -7,313,181,990,569,429,000 | 27.573034 | 79 | 0.616201 | false |
cyphactor/lifecyclemanager | testenv/trac-0.10.4/trac/web/modpython_frontend.py | 1 | 2761 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2005 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <[email protected]>
# Copyright (C) 2005 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
# Matthew Good <[email protected]>
from mod_python import apache
from trac.web.main import dispatch_request
from trac.web.wsgi import WSGIGateway, _ErrorsWrapper
class InputWrapper(object):
def __init__(self, req):
self.req = req
def close(self):
pass
def read(self, size=-1):
return self.req.read(size)
def readline(self, size=-1):
return self.req.readline(size)
def readlines(self, hint=-1):
return self.req.readlines(hint)
class ModPythonGateway(WSGIGateway):
wsgi_multithread = apache.mpm_query(apache.AP_MPMQ_IS_THREADED) > 0
wsgi_multiprocess = apache.mpm_query(apache.AP_MPMQ_IS_FORKED) > 0
def __init__(self, req, options):
environ = {}
environ.update(apache.build_cgi_env(req))
environ['mod_python.options'] = options
environ['mod_python.subprocess_env'] = req.subprocess_env
WSGIGateway.__init__(self, environ, InputWrapper(req),
_ErrorsWrapper(lambda x: req.log_error(x)))
self.req = req
def _send_headers(self):
assert self.headers_set, 'Response not started'
if not self.headers_sent:
status, headers = self.headers_sent = self.headers_set
self.req.status = int(status[:3])
for name, value in headers:
if name.lower() == 'content-length':
self.req.set_content_length(int(value))
elif name.lower() == 'content-type':
self.req.content_type = value
else:
self.req.headers_out.add(name, value)
def _sendfile(self, fileobj):
self._send_headers()
self.req.sendfile(fileobj.name)
def _write(self, data):
self._send_headers()
try:
self.req.write(data)
except IOError, e:
if 'client closed connection' not in str(e):
raise
def handler(req):
options = req.get_options()
gateway = ModPythonGateway(req, options)
gateway.run(dispatch_request)
return apache.OK
| gpl-3.0 | -3,080,754,147,963,335,000 | 30.375 | 72 | 0.62912 | false |
mhbashari/machine-learning-snippets | Basic/01-linear_regression_tensorflow.py | 1 | 2015 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from numpy.core.multiarray import ndarray
__author__ = "mhbashari"
class LinearRegression:
def __init__(self, train_X: ndarray, train_Y: ndarray, learning_rate=0.001, training_epochs=100):
self.train_X = train_X
self.train_Y = train_Y
self.learning_rate = learning_rate
self.training_epochs = training_epochs
def fit(self):
x = tf.placeholder("float")
y = tf.placeholder("float")
a = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
pred = tf.multiply(x, a) + b
cost = tf.reduce_mean(tf.abs(pred - y))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(self.training_epochs):
for i, out in zip(self.train_X, self.train_Y):
sess.run(optimizer, feed_dict={x: i, y: out})
print("Epoch:", '%04d' % (epoch + 1), "cost=", "W=", sess.run(a), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={x: self.train_X, y: self.train_Y})
print("Training cost=", training_cost, "a=", sess.run(a), "b=", sess.run(b), '\n')
return sess.run(a), sess.run(b)
def visualize(a, b, train_X: ndarray, train_Y: ndarray):
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, train_Y)
plt.plot(train_X, a * train_X + b, label='Fitted line')
plt.scatter(train_X, train_Y)
plt.legend()
plt.show()
def data_maker(num=80):
X = np.arange(0, num, dtype=np.float32)
Y = np.float32(np.ceil(5 * (np.sin(X) + X / 5)))
return X, Y
if __name__ == "__main__":
data = data_maker(5)
regression = LinearRegression(*data_maker())
visualize(*(regression.fit() + data_maker()))
| mit | -6,478,785,350,685,866,000 | 32.583333 | 104 | 0.585112 | false |
QKaiser/pynessus | pynessus/nessus.py | 1 | 26073 | """
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from httplib import HTTPSConnection, CannotSendRequest, ImproperConnectionState
import os
import json
import socket
import ssl
import errno
from xml.dom.minidom import parseString
from models.scan import Scan
from models.policy import Policy
from models.plugin import Plugin, PluginFamily, PluginRule
from models.user import User
from models.folder import Folder
from models.template import Template
from models.host import Host
from models.scanner import Scanner
from models.agent import Agent
from models.agentgroup import AgentGroup
from models.mail import Mail
from models.permission import Permission
from models.proxy import Proxy
from models.group import Group
from models.vulnerability import Vulnerability
class NessusAPIError(Exception):
pass
class Nessus(object):
"""
A Nessus Server instance.
"""
def __init__(self, url="", port=8834, verify=True):
"""
Constructor.
Params:
url(string): nessus server's url
port(int): nessus server's port
verify(bool): verify server's SSL cert if True
Returns:
"""
self._url = url
self._port = port
self._verify = verify
self._uuid = 0
self._connection = None
self._product = None
self._engine = None
self._web_ui = None
self._misc_settings = []
self._loaded_plugin_set = None
self._scanner_boottime = 0
self._idle_timeout = 0
self._plugin_set = None
self._plugins_lastupdated = 0
self._plugins_expiration = 0
self._web_server_version = None
self._expiration = None
self._nessus_ui_version = None
self._ec2 = None
self._nessus_type = None
self._capabilities = None
self._plugin_set = None
self._idle_timeout = None
self._scanner_boottime = None
self._server_version = None
self._feed = None
self._mail = None
self._proxy = None
# managing multiple user sessions
self._user = None
self._agents = []
self._agentgroups = []
self._schedules = []
self._policies = []
self._templates = []
self._scans = []
self._tags = []
self._folders = []
self._users = []
self._notifications = []
self._scanners = []
self._permissions = []
self._groups = []
self._plugin_families =[]
self._plugin_rules = []
self._plugins = []
self._headers = {
"Content-type": "application/json",
"Accept": "application/json"
}
def Agent(self):
return Agent(self)
def AgentGroup(self):
return AgentGroup(self)
def Scan(self):
return Scan(self)
def Host(self):
return Host(self)
def Policy(self):
return Policy(self)
def Plugin(self):
return Plugin(self)
def PluginFamily(self):
return PluginFamily(self)
def PluginRule(self):
return PluginRule(self)
def Schedule(self):
return Schedule(self)
def Scanner(self):
return Scanner(self)
def User(self, username=None, password=None):
return User(self, username, password)
def Folder(self):
return Folder(self)
def Template(self):
return Template(self)
def Mail(self):
return Mail(self)
def Permission(self):
return Permission(self)
def Proxy(self):
return Proxy(self)
def Group(self):
return Group(self)
def Vulnerability(self):
return Vulnerability(self)
def _request(self, method, target, params, headers=None):
"""
Send an HTTP request.
Params:
method(string): HTTP method (i.e. GET, POST, PUT, DELETE, HEAD)
target(string): target path (i.e. /schedule/new)
params(string): HTTP parameters
headers(array): HTTP headers
Returns:
Response body if successful, None otherwise.
"""
try:
if self._connection is None:
if not self._verify:
ctx = ssl._create_unverified_context()
self._connection = HTTPSConnection(self._url, self._port, context=ctx)
else:
self._connection = HTTPSConnection(self._url, self._port)
self._connection.request(method, target, params, self._headers if headers is None else headers)
except CannotSendRequest:
self._connection = HTTPSConnection(self._url, self._port)
self.login(self._user)
self._request(method, target, params, self._headers)
except ImproperConnectionState:
self._connection = HTTPSConnection(self._url, self._port)
self.login(self._user)
self._request(method, target, params, self._headers)
except socket.error as serr:
if serr.errno != errno.ECONNREFUSED:
# Not the error we are looking for, re-raise
raise serr
else:
raise Exception("Can't connect to Nessus at https://%s:%s" % (self._url, self._port))
response = self._connection.getresponse()
if response.status == 200:
return response.read()
else:
raise Exception(response.read())
def _api_request(self, method, target, params=None):
"""
Send a request to the Nessus REST API.
Params:
method(string): HTTP method (i.e. GET, PUT, POST, DELETE, HEAD)
target(string): target path (i.e. /schedule/new)
params(dict): HTTP parameters
Returns:
dict: parsed dict from json answer, None if no content.
"""
if not params:
params = {}
raw_response = self._request(method, target, json.dumps(params))
if raw_response is not None and len(raw_response):
response = json.loads(raw_response)
if response is not None and "error" in response:
raise NessusAPIError(response["error"])
return response
return None
@staticmethod
def _encode(filename):
"""
Encode filename content into a multipart/form-data data string.
Params:
filename(string): filename of the file that will be encoded.
Returns:
string: multipart/form-data data string
"""
boundary = '----------bundary------'
crlf = '\r\n'
body = []
with open(filename, "rb") as f:
body.extend(
[
'--' + boundary,
'Content-Disposition: form-data; name="Filedata"; filename="%s"' % (os.path.basename(filename)),
'Content-Type: text/xml',
'',
f.read(),
]
)
body.extend(['--' + boundary + '--', ''])
return 'multipart/form-data; boundary=%s' % boundary, crlf.join(body)
def login(self, user):
"""
Log into Nessus server with provided user profile.
Args:
user (User): user instance
Returns:
bool: True if successful login, False otherwise.
Raises:
"""
if self.server_version[0] != "6":
raise Exception("This version of Nessus is not supported by pynessus. \nIf you absolutely need to use "
"pynessus with Nessus 5.x, please follow the instructions"
"available on the git repository (https://github.com/qkaiser/pynessus)")
params = {'username': user.username, 'password': user.password}
response = self._api_request("POST", "/session", params)
if response is not None:
if "status" in response:
raise Exception(response["status"])
self._user = user
self._user.token = response['token']
# Persist token value for subsequent requests
self._headers["X-Cookie"] = 'token=%s' % (response['token'])
return True
else:
return False
def logout(self):
"""
Log out of the Nessus server, invalidating the current token value.
Returns:
bool: True if successful login, False otherwise.
"""
self._request("DELETE", "/session", [])
return True
@property
def status(self):
"""
Return the Nessus server status.
Params:
Returns
"""
response = self._api_request("GET", "/server/status", "")
if response is not None:
return response["status"]
else:
return "unknown"
def load(self):
"""
Load Nessus.
Returns:
bool: True if successful login, False otherwise.
"""
success = True
success &= self.load_properties()
success &= self.load_mail()
success &= self.load_proxy()
success &= self.load_scanners()
success &= self.load_agents()
success &= self.load_agentgroups()
success &= self.load_policies()
success &= self.load_scans()
success &= self.load_folders()
success &= self.load_templates()
success &= self.load_users()
#success &= self.load_groups()
#success &= self.load_plugin_families()
#success &= self.load_plugin_rules()
return success
def load_plugin_families(self):
"""
:return:
"""
response = self._api_request("GET", "/plugins/families", "")
if response is not None and "families" in response:
for family in response["families"]:
p = self.PluginFamily()
p.id = family["id"]
p.name = family["name"]
p.plugin_count = family["count"]
p.load_plugins()
self._plugin_families.append(p)
return True
def load_plugin_rules(self):
"""
:return:
"""
response = self._api_request("GET", "/plugin-rules", "")
if "plugin_rules" in response and response["plugin_rules"] is not None:
for p in response["plugin_rules"]:
plugin_rule = self.PluginRule()
plugin_rule.id = p["id"]
plugin_rule.plugin_id = p["plugin_id"]
plugin_rule.date = p["date"]
plugin_rule.host = p["host"]
plugin_rule.type = p["type"]
plugin_rule.owner = p["owner"]
plugin_rule.owner_id = p["owner_id"]
self._plugin_rules.append(plugin_rule)
return True
def load_groups(self):
"""
:return:
"""
response = self._api_request("GET", "/groups")
if "groups" in response and response["groups"] is not None:
for g in response["groups"]:
group = self.Group()
group.id = g["id"]
group.name = g["name"]
group.user_count = g["user_count"]
group.permissions = g["permissions"]
self._groups.append(group)
return True
def load_agents(self):
"""
:return:
"""
for scanner in self._scanners:
response = self._api_request("GET", "/scanners/%d/agents" % scanner.id)
if "agents" in response and response["agents"] is not None:
for a in response["agents"]:
agent = self.Agent()
agent.distros = a["distros"]
agent.id = a["id"]
agent.ip = a["ip"]
agent.last_scanned = a["last_scanned"]
agent.name = a["name"]
agent.platform = a["platform"]
agent.token = a["token"]
agent.uuid = a["uuid"]
agent.scanner_id = scanner.id
self._agents.append(agent)
return True
def load_agentgroups(self):
"""
:return:
"""
for scanner in self._scanners:
response = self._api_request("GET", "/scanners/%d/agent-groups" % scanner.id)
if "groups" in response and response["groups"] is not None:
for g in response["groups"]:
group = self.AgentGroup()
group.id = g["id"]
group.name = g["name"]
group.owner_id = g["owner_id"]
group.owner = g["owner"]
group.shared = g["shared"]
group.user_permissions = g["user_permissions"]
group.creation_date = g["creation_date"]
group.last_modification_date = g["last_modification_date"]
self._agentgroups.append(group)
return True
def load_properties(self):
"""
Load Nessus server properties.
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/server/properties?json=1", {})
if response is not None:
self._loaded_plugin_set = response["loaded_plugin_set"]
self._uuid = response["server_uuid"]
self._expiration = response["expiration"]
self._nessus_ui_version = response["nessus_ui_version"]
self._nessus_type = response["nessus_type"]
self._notifications = []
for notification in response["notifications"]:
self._notifications.append(notification)
self._capabilities = response["capabilities"]
self._plugin_set = response["plugin_set"]
self._idle_timeout = response["idle_timeout"]
self._scanner_boottime = response["scanner_boottime"]
self._server_version = response["server_version"]
return True
else:
return False
def load_mail(self):
self._mail = self.Mail()
return self._mail.load()
def load_proxy(self):
self._proxy = self.Proxy()
return self._proxy.load()
def load_templates(self):
"""
Load Nessus server's scan templates.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/editor/scan/templates", "")
self._templates = []
if "templates" in response:
for t in response["templates"]:
template = self.Template()
template.uuid = t["uuid"]
template.title = t["title"]
template.name = t["name"]
template.description = t["desc"]
template.more_info = t["more_info"] if "more_info" in t else None
template.cloud_only = t["cloud_only"]
template.subscription_only = t["subscription_only"]
self._templates.append(template)
return True
def load_scanners(self):
"""
:return:
"""
response = self._api_request("GET", "/scanners")
if "scanners" in response:
for s in response["scanners"]:
scanner = self.Scanner()
scanner.id = s["id"]
scanner.uuid = s["uuid"]
scanner.name = s["name"]
scanner.type = s["type"]
scanner.status = s["status"]
scanner.scan_count = s["scan_count"]
scanner.engine_version = s["engine_version"]
scanner.platform = s["platform"]
scanner.loaded_plugin_set = s["loaded_plugin_set"]
scanner.registration_code = s["registration_code"]
scanner.owner = s["owner"]
self._scanners.append(scanner)
return True
def load_scans(self, tag_id=None):
"""
Load Nessus server's scans. Load scans from a specific tag if tag_id is provided.
Params:
tag_id(int, optional): Tag's identification number.
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/scans", "")
self._scans = []
if "scans" in response and response["scans"] is not None:
for s in response["scans"]:
scan = self.Scan()
scan.status = s["status"]
scan.name = s["name"]
scan.read = s["read"]
scan.last_modification_date = s["last_modification_date"]
scan.creation_date = s["creation_date"]
scan.user_permissions = s["user_permissions"]
scan.shared = s["shared"]
scan.id = s["id"]
scan.template = self.Template()
scan.template.uuid = s["uuid"]
scan.folder = self.Folder()
scan.folder.id = s["folder_id"]
for user in self.users:
if user.id == s["owner_id"]:
scan.owner = user
self._scans.append(scan)
return True
def load_folders(self):
"""
Params:
Returns:
"""
response = self._api_request("GET", "/folders")
if "folders" in response:
self._folders = []
for result in response["folders"]:
f = self.Folder()
f.id = result["id"]
f.type = result["type"] if "type" in result else "local"
f.custom = result["custom"]
f.default_tag = result["default_tag"]
f.name = result["name"]
f.unread_count = result["unread_count"] if "unread_count" in result else 0
self._folders.append(f)
return True
else:
return False
def load_policies(self):
"""
Load Nessus server's policies.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/policies")
if "policies" in response and response["policies"] is not None:
self._policies = []
for result in response['policies']:
policy = self.Policy()
policy.id = result["id"]
policy.template_uuid = result["template_uuid"]
policy.name = result["name"]
policy.owner = result["owner"]
policy.creation_date = result["creation_date"]
policy.no_target = result["no_target"] if "no_target" in result else False
policy.visibility = result["visibility"]
policy.shared = result["shared"]
policy.user_permissions = result["user_permissions"]
policy.last_modification_date = result["last_modification_date"]
policy.creation_date = result["creation_date"]
self._policies.append(policy)
return True
def load_users(self):
"""
Load Nessus server's users.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/users")
if "users" in response:
users = []
for result in response["users"]:
user = self.User()
user.last_login = result["lastlogin"]
user.permissions = result["permissions"]
user.type = result["type"]
user.name = result["name"]
user.username = result["username"]
user.id = result["id"]
users.append(user)
self._users = users
return True
else:
return False
def upload_file(self, filename):
"""
Upload the file identified by filename to the server.
Params:
filename(string): file path
Returns:
bool: True if successful, False otherwise.
"""
if not os.path.isfile(filename):
raise Exception("This file does not exist.")
else:
content_type, body = self._encode(filename)
headers = self._headers
headers["Content-type"] = content_type
response = json.loads(self._request("POST", "/file/upload", body, self._headers))
if "fileuploaded" in response:
return response["fileuploaded"]
else:
return False
def import_policy(self, filename):
"""
Import an existing policy uploaded using Nessus.file (.nessus format only).
Params:
Returns:
"""
uploaded_file = self.upload_file(filename)
if uploaded_file:
response = self._api_request(
"POST",
"/policies/import",
{"file": uploaded_file}
)
return True if response is None else False
else:
raise Exception("An error occured while uploading %s." % filename)
def import_scan(self, filename, folder_id=None, password=None):
"""
Import an existing policy uploaded using Nessus.file (.nessus format only).
Params:
filename(str):
folder_id(int):
password(str):
Returns:
"""
uploaded_file = self.upload_file(filename)
if uploaded_file:
params = {"file": uploaded_file}
if folder_id is not None:
params["folder_id"] = folder_id
if password is not None:
params["password"] = password
response = self._api_request(
"POST",
"/scans/import",
params
)
return True if response is None else False
@property
def server_version(self):
if self._server_version is None:
if "404 File not found" not in self._request("GET", "/nessus6.html", ""):
self._server_version = "6.x"
elif self._request("GET", "/html5.html", "") is not None:
self._server_version = "5.x"
else:
self._server_version = "unknown"
return self._server_version
@property
def agents(self):
if self._agents is None:
self.load_agents()
return self._agents
@property
def agentgroups(self):
if self._agentgroups is None:
self.load_agentgrous()
return self._agentgroups
@property
def scanners(self):
if not len(self._scanners):
self.load_scanners()
return self._scanners
@property
def scans(self):
if self._scans is None:
self.load_scans()
return self._scans
@property
def policies(self):
if self._policies is None:
self.load_policies()
return self._policies
@property
def users(self):
if self._users is None:
self.load_users()
return self._users
@property
def tags(self):
if self._tags is None:
self.load_tags()
return self._tags
@property
def templates(self):
if not len(self._templates):
self.load_templates()
return self._templates
@property
def mail(self):
return self._mail
@property
def proxy(self):
return self._proxy
@property
def folders(self):
if not len(self._folders):
self.load_folders()
return self._folders
@property
def groups(self):
return self._groups
@property
def user(self):
return self._user
@property
def plugin_families(self):
return self._plugin_families
@property
def plugin_rules(self):
return self._plugin_rules
@policies.setter
def policies(self, value):
self._policies = value
@scans.setter
def scans(self, value):
self._scans = value
@tags.setter
def tags(self, value):
self._tags = value
@users.setter
def users(self, value):
self._users = value
@templates.setter
def templates(self, value):
self._templates = value
@scanners.setter
def scanners(self, value):
self._scanners = value
@agents.setter
def agents(self, value):
self._agents = value
@agentgroups.setter
def agentgroups(self, value):
self._agentgroups = value
@mail.setter
def mail(self, value):
if isinstance(value, Mail):
self._mail = value
else:
raise Exception("Not a Mail instance")
@proxy.setter
def proxy(self, value):
if isinstance(value, Proxy):
self._proxy = value
else:
raise Exception("Not a Proxy instance")
@folders.setter
def folders(self, value):
self._folders = value
@groups.setter
def groups(self, value):
self._groups = value
@user.setter
def user(self, value):
if isinstance(value, User):
self._user = value
else:
raise Exception("Not a User instance")
| apache-2.0 | -6,025,734,581,301,239,000 | 31.149199 | 116 | 0.536225 | false |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/flow.py | 1 | 3257 | """
.. function:: flow(query:None)
Translates the input query results into sql statements if possible.
:Returned table schema:
- *query* text
A complete sql query statement with the semicolon at the end
.. note::
Input query results must be sql statements separated with semicolons in the first place. Using in the input query the :func:`~functions.vtable.file.file` operator any file with sql statements can be divided in sql query statements. Multiline comments are considered as statements.
Examples:
.. doctest::
>>> sql("select * from (flow file 'testing/testflow.sql') limit 1") # doctest: +NORMALIZE_WHITESPACE
query
-----------------------------------------------------------------------------------------------------------------------------------------------------------
/*====== countries: table of Country ISO codes , country names ===========*/
CREATE TABLE countries (
country2 PRIMARY KEY UNIQUE,
country_name
);
>>> sql("select * from (flow file 'testing/colpref.csv' limit 5) ") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator FLOW: Incomplete statement found : userid colid pr ... 41 416900.0 agr
Test files:
- :download:`testflow.sql <../../functions/vtable/testing/testflow.sql>`
- :download:`colpref.csv <../../functions/vtable/testing/colpref.csv>`
"""
import apsw
import functions
import re
import vtbase
registered = True
def filterlinecomment(s):
if re.match(r'\s*--', s, re.DOTALL | re.UNICODE):
return ''
else:
return s
class FlowVT(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "No query argument ")
query = dictargs['query']
connection = envars['db']
yield (('query', 'text'),)
cur = connection.cursor()
execit = cur.execute(query, parse=False)
st = ''
for row in execit:
strow = filterlinecomment(' '.join(row))
if strow == '':
continue
if st != '':
st += '\n' + strow
else:
st += strow
if apsw.complete(st):
yield [st]
st = ''
if len(st) > 0 and not re.match(r'\s+$', st, re.DOTALL | re.UNICODE):
if len(st) > 35:
raise functions.OperatorError(__name__.rsplit('.')[-1],
"Incomplete statement found : %s ... %s" % (st[:15], st[-15:]))
else:
raise functions.OperatorError(__name__.rsplit('.')[-1], "Incomplete statement found : %s" % (st))
def Source():
return vtbase.VTGenerator(FlowVT)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| mit | -3,962,485,522,345,674,000 | 28.609091 | 284 | 0.55327 | false |
sergeneren/anima | anima/env/mayaEnv/relax_vertices.py | 1 | 1428 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""Relax Vertices by Erkan Ozgur Yilmaz
Relaxes vertices without shrinking/expanding the geometry.
Version History
---------------
v0.1.1
- script works with all kind of components
v0.1.0
- initial working version
"""
import pymel.core as pm
__version__ = "0.1.1"
def relax():
# check the selection
selection = pm.ls(sl=1)
if not selection:
return
# convert the selection to vertices
verts = pm.ls(pm.polyListComponentConversion(tv=1))
if not verts:
return
shape = verts[0].node()
# duplicate the geometry
dup = shape.duplicate()[0]
dup_shape = dup.getShape()
# now relax the selected vertices of the original shape
pm.polyAverageVertex(verts, i=1, ch=0)
# now transfer point positions using transferAttributes
ta_node = pm.transferAttributes(
dup,
verts,
transferPositions=True,
transferNormals=False,
transferUVs=False,
transferColors=False,
sampleSpace=0,
searchMethod=0,
flipUVs=False,
colorBorders=1,
)
# delete history
pm.delete(shape, ch=1)
# delete the duplicate surface
pm.delete(dup)
# reselect selection
pm.select(selection)
| bsd-2-clause | 2,163,826,030,786,291,200 | 20.636364 | 68 | 0.64916 | false |
corbt/pypeline | setup.py | 1 | 1413 | import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open(os.path.join(os.path.dirname(__file__), 'pypeline', '_version.py')) as f:
exec(f.read(), globals(), locals())
long_description = ''
if os.path.exists('README.txt'):
long_description = open('README.txt').read()
setup(
name = "pypeline-db",
version = __version__,
author = "Kyle Corbitt",
author_email = "[email protected]",
description = "A database for processing and storing datasets based on LevelDB",
license = "MIT",
keywords = "levelDB big data data science",
url = "https://github.com/kcorbitt/pypeline",
packages=['pypeline'],
long_description=long_description,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Intended Audience :: Science/Research",
"Operating System :: POSIX",
"Topic :: Utilities",
"Topic :: Database",
"Topic :: Scientific/Engineering",
],
install_requires=['plyvel']
)
| mit | -2,353,462,996,388,564,500 | 32.642857 | 84 | 0.635527 | false |
facelessuser/sublime-markdown-popups | st3/mdpopups/pygments/lexers/pawn.py | 1 | 8073 | # -*- coding: utf-8 -*-
"""
pygments.lexers.pawn
~~~~~~~~~~~~~~~~~~~~
Lexers for the Pawn languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from ..lexer import RegexLexer
from ..token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from ..util import get_bool_opt
__all__ = ['SourcePawnLexer', 'PawnLexer']
class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
.. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
filenames = ['*.sp']
mimetypes = ['text/x-sourcepawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(case|const|continue|native|'
r'default|else|enum|for|if|new|operator|'
r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
SM_TYPES = set(('Action', 'bool', 'Float', 'Plugin', 'String', 'any',
'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
'ConVarBounds', 'QueryCookie', 'ReplySource',
'ConVarQueryResult', 'ConVarQueryFinished', 'Function',
'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult',
'DBBindType', 'DBPriority', 'PropType', 'PropFieldType',
'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode',
'EventHook', 'FileType', 'FileTimeMode', 'PathType',
'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes',
'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction',
'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary',
'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType',
'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType',
'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
'TopMenuPosition', 'TopMenuObject', 'UserMsg'))
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemod_builtins import FUNCTIONS
self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.smhighlighting:
if value in self.SM_TYPES:
token = Keyword.Type
elif value in self._functions:
token = Name.Builtin
yield index, token, value
class PawnLexer(RegexLexer):
"""
For Pawn source code.
.. versionadded:: 2.0
"""
name = 'Pawn'
aliases = ['pawn']
filenames = ['*.p', '*.pwn', '*.inc']
mimetypes = ['text/x-pawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(switch|case|default|const|new|static|char|continue|break|'
r'if|else|for|while|do|operator|enum|'
r'public|return|sizeof|tagof|state|goto)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
| mit | -358,244,456,042,769,800 | 39.567839 | 84 | 0.445559 | false |
wooga/airflow | airflow/utils/log/stackdriver_task_handler.py | 1 | 11730 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Handler that integrates with Stackdriver
"""
import logging
from typing import Dict, List, Optional, Tuple, Type
from cached_property import cached_property
from google.api_core.gapic_v1.client_info import ClientInfo
from google.cloud import logging as gcp_logging
from google.cloud.logging.handlers.transports import BackgroundThreadTransport, Transport
from google.cloud.logging.resource import Resource
from airflow import version
from airflow.models import TaskInstance
DEFAULT_LOGGER_NAME = "airflow"
_GLOBAL_RESOURCE = Resource(type="global", labels={})
class StackdriverTaskHandler(logging.Handler):
"""Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
It can also be used to save logs for executing tasks. To do this, you should set as a handler with
the name "tasks". In this case, it will also be used to read the log for display in Web UI.
This handler supports both an asynchronous and synchronous transport.
:param gcp_conn_id: Connection ID that will be used for authorization to the Google Cloud Platform.
If omitted, authorization based on `the Application Default Credentials
<https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
be used.
:type gcp_conn_id: str
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'airflow'. The name of the Python logger will be represented
in the ``python_logger`` field.
:type name: str
:param transport: Class for creating new transport objects. It should
extend from the base :class:`google.cloud.logging.handlers.Transport` type and
implement :meth`google.cloud.logging.handlers.Transport.send`. Defaults to
:class:`google.cloud.logging.handlers.BackgroundThreadTransport`. The other
option is :class:`google.cloud.logging.handlers.SyncTransport`.
:type transport: :class:`type`
:param resource: (Optional) Monitored resource of the entry, defaults
to the global resource type.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param labels: (Optional) Mapping of labels for the entry.
:type labels: dict
"""
LABEL_TASK_ID = "task_id"
LABEL_DAG_ID = "dag_id"
LABEL_EXECUTION_DATE = "execution_date"
LABEL_TRY_NUMBER = "try_number"
def __init__(
self,
gcp_conn_id: Optional[str] = None,
name: str = DEFAULT_LOGGER_NAME,
transport: Type[Transport] = BackgroundThreadTransport,
resource: Resource = _GLOBAL_RESOURCE,
labels: Optional[Dict[str, str]] = None,
):
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.name: str = name
self.transport_type: Type[Transport] = transport
self.resource: Resource = resource
self.labels: Optional[Dict[str, str]] = labels
self.task_instance_labels: Optional[Dict[str, str]] = {}
@cached_property
def _client(self) -> gcp_logging.Client:
"""Google Cloud Library API client"""
if self.gcp_conn_id:
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
hook = GoogleBaseHook(gcp_conn_id=self.gcp_conn_id)
credentials = hook._get_credentials() # pylint: disable=protected-access
else:
# Use Application Default Credentials
credentials = None
client = gcp_logging.Client(
credentials=credentials,
client_info=ClientInfo(client_library_version='airflow_v' + version.version)
)
return client
@cached_property
def _transport(self) -> Transport:
"""Object responsible for sending data to Stackdriver"""
return self.transport_type(self._client, self.name)
def emit(self, record: logging.LogRecord) -> None:
"""Actually log the specified logging record.
:param record: The record to be logged.
:type record: logging.LogRecord
"""
message = self.format(record)
labels: Optional[Dict[str, str]]
if self.labels and self.task_instance_labels:
labels = {}
labels.update(self.labels)
labels.update(self.task_instance_labels)
elif self.labels:
labels = self.labels
elif self.task_instance_labels:
labels = self.task_instance_labels
else:
labels = None
self._transport.send(record, message, resource=self.resource, labels=labels)
def set_context(self, task_instance: TaskInstance) -> None:
"""
Configures the logger to add information with information about the current task
:param task_instance: Currently executed task
:type task_instance: TaskInstance
"""
self.task_instance_labels = self._task_instance_to_labels(task_instance)
def read(
self, task_instance: TaskInstance, try_number: Optional[int] = None, metadata: Optional[Dict] = None
) -> Tuple[List[str], List[Dict]]:
"""
Read logs of given task instance from Stackdriver logging.
:param task_instance: task instance object
:type: task_instance: TaskInstance
:param try_number: task instance try_number to read logs from. If None
it returns all logs
:type try_number: Optional[int]
:param metadata: log metadata. It is used for steaming log reading and auto-tailing.
:type metadata: Dict
:return: a tuple of list of logs and list of metadata
:rtype: Tuple[List[str], List[Dict]]
"""
if try_number is not None and try_number < 1:
logs = ["Error fetching the logs. Try number {} is invalid.".format(try_number)]
return logs, [{"end_of_log": "true"}]
if not metadata:
metadata = {}
ti_labels = self._task_instance_to_labels(task_instance)
if try_number is not None:
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
else:
del ti_labels[self.LABEL_TRY_NUMBER]
log_filter = self._prepare_log_filter(ti_labels)
next_page_token = metadata.get("next_page_token", None)
all_pages = 'download_logs' in metadata and metadata['download_logs']
messages, end_of_log, next_page_token = self._read_logs(log_filter, next_page_token, all_pages)
new_metadata = {"end_of_log": end_of_log}
if next_page_token:
new_metadata['next_page_token'] = next_page_token
return [messages], [new_metadata]
def _prepare_log_filter(self, ti_labels: Dict[str, str]) -> str:
"""
Prepares the filter that chooses which log entries to fetch.
More information:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list#body.request_body.FIELDS.filter
https://cloud.google.com/logging/docs/view/advanced-queries
:param ti_labels: Task Instance's labels that will be used to search for logs
:type: Dict[str, str]
:return: logs filter
"""
def escape_label_key(key: str) -> str:
return f'"{key}"' if "." in key else key
def escale_label_value(value: str) -> str:
escaped_value = value.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped_value}"'
log_filters = [
f'resource.type={escale_label_value(self.resource.type)}',
f'logName="projects/{self._client.project}/logs/{self.name}"'
]
for key, value in self.resource.labels.items():
log_filters.append(f'resource.labels.{escape_label_key(key)}={escale_label_value(value)}')
for key, value in ti_labels.items():
log_filters.append(f'labels.{escape_label_key(key)}={escale_label_value(value)}')
return "\n".join(log_filters)
def _read_logs(
self,
log_filter: str,
next_page_token: Optional[str],
all_pages: bool
) -> Tuple[str, bool, Optional[str]]:
"""
Sends requests to the Stackdriver service and downloads logs.
:param log_filter: Filter specifying the logs to be downloaded.
:type log_filter: str
:param next_page_token: The token of the page from which the log download will start.
If None is passed, it will start from the first page.
:param all_pages: If True is passed, all subpages will be downloaded. Otherwise, only the first
page will be downloaded
:return: A token that contains the following items:
* string with logs
* Boolean value describing whether there are more logs,
* token of the next page
:rtype: Tuple[str, bool, str]
"""
messages = []
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token,
)
messages.append(new_messages)
if all_pages:
while next_page_token:
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token
)
messages.append(new_messages)
end_of_log = True
next_page_token = None
else:
end_of_log = not bool(next_page_token)
return "\n".join(messages), end_of_log, next_page_token
def _read_single_logs_page(self, log_filter: str, page_token: Optional[str] = None) -> Tuple[str, str]:
"""
Sends requests to the Stackdriver service and downloads single pages with logs.
:param log_filter: Filter specifying the logs to be downloaded.
:type log_filter: str
:param page_token: The token of the page to be downloaded. If None is passed, the first page will be
downloaded.
:type page_token: str
:return: Downloaded logs and next page token
:rtype: Tuple[str, str]
"""
entries = self._client.list_entries(filter_=log_filter, page_token=page_token)
page = next(entries.pages)
next_page_token = entries.next_page_token
messages = []
for entry in page:
if "message" in entry.payload:
messages.append(entry.payload["message"])
return "\n".join(messages), next_page_token
@classmethod
def _task_instance_to_labels(cls, ti: TaskInstance) -> Dict[str, str]:
return {
cls.LABEL_TASK_ID: ti.task_id,
cls.LABEL_DAG_ID: ti.dag_id,
cls.LABEL_EXECUTION_DATE: str(ti.execution_date.isoformat()),
cls.LABEL_TRY_NUMBER: str(ti.try_number),
}
| apache-2.0 | 2,753,684,676,931,630,600 | 40.013986 | 111 | 0.641347 | false |
kagklis/Frequent-Itemset-Hiding-Toolbox-x86 | Apriori.py | 1 | 2242 | #-------------------------------------------------------------------------------
# Name: Apriori.py
# Purpose: Mining Frequent Itemsets
# Author: Vasileios Kagklis
# Created: 10/02/2014
# Copyright: (c) Vasileios Kagklis
#-------------------------------------------------------------------------------
from __future__ import division, print_function
import os
from time import clock
from fim import apriori
from myiolib import readDataset
def printResults(fname, sup, Time, F, out_fname):
result_file=open(out_fname,'w')
visible_file=open('Apriori_visible.txt','w')
print('Apriori Execution',file=visible_file)
print('=================',file=visible_file)
print('Data Set from File:',fname,file=visible_file)
print('Support= ',sup,file=visible_file)
print('Frequent Itemsets ==> Support:',file=visible_file)
print('',file=visible_file)
print('Results:','\n',file=visible_file)
data_line=''
itemset_and_sup=''
Vis_itemset_and_sup=''
for itemset, support in F.items():
ItemSet=list(itemset)
ItemSet.sort()
for item in ItemSet:
data_line=data_line+item+' '
itemset_and_sup=data_line+(str(support))
Vis_itemset_and_sup=data_line+'==>'+(str(round(support,5)))
print(itemset_and_sup,file=result_file)
print(Vis_itemset_and_sup,file=visible_file)
data_line=''
itemset_and_sup=''
Vis_itemset_and_sup=''
print('Execution time= ',Time,file=visible_file)
visible_file.close()
result_file.close()
def convert2dic(F, N):
freq = {}
for itemset in F:
freq[frozenset(itemset[0])] = float(itemset[1][0]/N)
return freq
def convert2frozen_m(f):
result = []
for itemset in f:
result.append(frozenset(itemset[0]))
return(result)
def Apriori_main(data_fname, minSupport, out_fname='Apriori_results.txt'):
lines,tid = readDataset(data_fname)
t1=clock()
temp_freq = apriori(tid, target='s', supp=float(minSupport*100), conf=100)
CPU_time=clock()-t1
freq_items = convert2dic(temp_freq,lines)
printResults(data_fname,minSupport,CPU_time,freq_items,out_fname)
return(freq_items,CPU_time)
| mit | -4,553,797,885,954,154,000 | 34.03125 | 80 | 0.591436 | false |
hbiyik/tribler | src/tribler-core/tribler_core/utilities/unicode.py | 1 | 2666 | """
Conversions to unicode.
Author(s): Arno Bakker
"""
import binascii
import chardet
def ensure_unicode(s, encoding, errors='strict'):
"""Similar to six.ensure_text() except that the encoding parameter is *not* optional
"""
if isinstance(s, bytes):
return s.decode(encoding, errors)
elif isinstance(s, str):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def ensure_unicode_detect_encoding(s):
"""Similar to ensure_unicode() but use chardet to detect the encoding
"""
if isinstance(s, bytes):
try:
return s.decode('utf-8') # Try converting bytes --> Unicode utf-8
except UnicodeDecodeError:
charenc = chardet.detect(s)['encoding']
return s.decode(charenc) if charenc else s # Hope for the best
elif isinstance(s, str):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def recursive_unicode(obj, ignore_errors=False):
"""
Converts any bytes within a data structure to unicode strings. Bytes are assumed to be UTF-8 encoded text.
:param obj: object comprised of lists/dicts/strings/bytes
:return: obj: object comprised of lists/dicts/strings
"""
if isinstance(obj, dict):
return {recursive_unicode(k, ignore_errors):recursive_unicode(v, ignore_errors) for k, v in obj.items()}
elif isinstance(obj, list):
return [recursive_unicode(i, ignore_errors) for i in obj]
elif isinstance(obj, bytes):
try:
return obj.decode('utf8')
except UnicodeDecodeError:
if ignore_errors:
return "".join((chr(c) for c in obj))
raise
return obj
def recursive_ungarble_metainfo(obj):
if isinstance(obj, dict):
return {k:recursive_ungarble_metainfo(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [recursive_ungarble_metainfo(i) for i in obj]
elif isinstance(obj, str):
return bytes(ord(c) for c in obj)
return obj
def recursive_bytes(obj):
"""
Converts any unicode strings within a Python data structure to bytes. Strings will be encoded using UTF-8.
:param obj: object comprised of lists/dicts/strings/bytes
:return: obj: object comprised of lists/dicts/bytes
"""
if isinstance(obj, dict):
return {recursive_bytes(k):recursive_bytes(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [recursive_bytes(i) for i in obj]
elif isinstance(obj, str):
return obj.encode('utf8')
return obj
def hexlify(binary):
return binascii.hexlify(binary).decode('utf-8')
| lgpl-3.0 | 2,811,927,565,161,218,600 | 31.512195 | 112 | 0.645536 | false |
commtrack/temp-aquatest | apps/buildmanager/models.py | 1 | 25166 | import os, sys
import logging
import traceback
from django.conf import settings
from datetime import datetime
import time
# make things easier so people don't have to install pygments
try:
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
pygments_found=True
except ImportError:
pygments_found=False
from zipstream import ZipStream
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from domain.models import Domain
from django.contrib.auth.models import User
from hq.utils import build_url
from requestlogger.models import RequestLog
from xformmanager.models import FormDefModel
from xformmanager.manager import XFormManager
from buildmanager import xformvalidator
from buildmanager.jar import validate_jar, extract_xforms
from buildmanager.exceptions import BuildError
BUILDFILES_PATH = settings.RAPIDSMS_APPS['buildmanager']['buildpath']
class Project (models.Model):
"""
A project is a high level container for a given build project. A project
can contain a history of builds
"""
domain = models.ForeignKey(Domain)
name = models.CharField(max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# the optional project id in a different server (e.g. the build server)
project_id = models.CharField(max_length=20, null=True, blank=True)
@property
def downloads(self):
'''Get all the downloads associated with this project, across
builds.'''
return BuildDownload.objects.filter(build__project=self)
def get_non_released_builds(self):
'''Get all non-released builds for this project'''
return self.builds.exclude(status="release").order_by('-package_created')
def get_released_builds(self):
'''Get all released builds for a project'''
return self.builds.filter(status="release").order_by('-released')
def get_latest_released_build(self):
'''Gets the latest released build for a project, based on the
released date.'''
releases = self.get_released_builds()
if releases:
return releases[0]
def get_latest_jar_url(self):
'''Get the URL for the latest released jar file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jar_filename()))
return None
def get_latest_jad_url(self):
'''Get the URL for the latest released jad file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jad_filename()))
return None
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewType.html?buildTypeId=bt%s' % self.project_id
def num_builds(self):
'''Get the number of builds associated with this project'''
return self.builds.all().count()
def __unicode__(self):
return unicode(self.name)
UNKNOWN_IP = "0.0.0.0"
BUILD_STATUS = (
('build', 'Standard Build'),
('release', 'Release'),
)
class ProjectBuild(models.Model):
'''When a jad/jar is built, it should correspond to a unique ReleasePackage
With all corresponding meta information on release info and build
information such that it can be traced back to a url/build info in source
control.'''
project = models.ForeignKey(Project, related_name="builds")
uploaded_by = models.ForeignKey(User, related_name="builds_uploaded")
status = models.CharField(max_length=64, choices=BUILD_STATUS, default="build")
build_number = models.PositiveIntegerField(help_text="the teamcity build number")
revision_number = models.CharField(max_length=255, null=True, blank=True,
help_text="the source control revision number")
version = models.CharField(max_length=20, null=True, blank=True,
help_text = 'the "release" version. e.g. 2.0.1')
package_created = models.DateTimeField()
jar_file = models.FilePathField(_('JAR File Location'),
match='.*\.jar$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
jad_file = models.FilePathField(_('JAD File Location'),
match='.*\.jad$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# release info
released = models.DateTimeField(null=True, blank=True)
released_by = models.ForeignKey(User, null=True, blank=True, related_name="builds_released")
def __unicode__(self):
return "%s build: %s. jad: %s, jar: %s" %\
(self.project, self.build_number, self.jad_file, self.jar_file)
def __str__(self):
return unicode(self).encode('utf-8')
def get_display_string(self):
'''Like calling str() but with a url attached'''
return "%s\nurl on server: %s" % (str(self),
build_url(reverse('show_build',
args=(self.id,))))
def get_jar_download_count(self):
return len(self.downloads.filter(type="jar"))
def get_jad_download_count(self):
return len(self.downloads.filter(type="jad"))
@property
def upload_information(self):
'''Get the upload request information associated with this,
if it is present.'''
try:
return BuildUpload.objects.get(build=self).log
except BuildUpload.DoesNotExist:
return None
def save(self):
"""Override save to provide some simple enforcement of uniqueness to the build numbers
generated by the submission of the build"""
if ProjectBuild.objects.filter(project=self.project).filter(build_number=self.build_number).count() > 0 and self.id == None:
raise Exception ("Error, the build number must be unique for this project build: " + str(self.build_number) + " project: " + str(self.project.id))
else:
super(ProjectBuild, self).save()
def get_jar_size(self):
return os.path.getsize(self.jar_file)
def get_jad_size(self):
return os.path.getsize(self.jad_file)
def get_jar_filename(self):
'''Returns the name (no paths) of the jar file'''
return os.path.basename(self.jar_file)
def get_jad_filename(self):
'''Returns the name (no paths) of the jad file'''
return os.path.basename(self.jad_file)
def get_zip_filename(self):
'''Returns the name (no paths) of the zip file, which will include the version number infromation'''
fname = os.path.basename(self.jar_file)
basename = os.path.splitext(fname)[0]
zipfilename = basename + "-build" + str(self.build_number) + ".zip"
return zipfilename
def get_jar_filestream(self):
try:
fin = open(self.jar_file,'r')
return fin
except Exception, e:
logging.error("Unable to open jarfile", extra={"exception": e,
"jar_file": self.jar_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_filestream(self, mode='r'):
try:
fin = open(self.jad_file, mode)
return fin
except Exception, e:
logging.error("Unable to open jadfile", extra={"exception": e,
"jad_file": self.jad_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_zip_filestream(self):
try:
zpath = str(os.path.dirname(self.jar_file) + "/")
buf = StringIO()
zp = ZipStream(zpath)
for data in zp:
buf.write(data)
#print data
buf.flush()
buf.seek(0)
return buf.read()
except Exception, e:
logging.error("Unable to open create ZipStream", extra={"exception": e,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_contents(self):
'''Returns the contents of the jad as text.'''
file = self.get_jad_filestream()
lines = []
for line in file:
lines.append(line.strip())
return "<br>".join(lines)
def get_jad_properties(self):
'''Reads the properties of the jad file and returns a dict'''
file = self.get_jad_filestream()
sep = ': '
proplines = [line.strip() for line in file.readlines() if line.strip()]
jad_properties = {}
for propln in proplines:
i = propln.find(sep)
if i == -1:
pass #log error?
(propname, propvalue) = (propln[:i], propln[i+len(sep):])
jad_properties[propname] = propvalue
return jad_properties
def write_jad(self, properties):
'''Write a property dictionary back to the jad file'''
ordered = ['MIDlet-Name', 'MIDlet-Version', 'MIDlet-Vendor', 'MIDlet-Jar-URL',
'MIDlet-Jar-Size', 'MIDlet-Info-URL', 'MIDlet-1']
for po in ordered:
if po not in properties.keys():
pass #log error -- required property is missing?
unordered = [propname for propname in properties.keys() if propname not in ordered]
ordered.extend(sorted(unordered))
proplines = ['%s: %s\n' % (propname, properties[propname]) for propname in ordered]
file = self.get_jad_filestream('w')
file.write(''.join(proplines))
file.close()
def add_jad_properties(self, propdict):
'''Add properties to the jad file'''
props = self.get_jad_properties()
props.update(propdict)
self.write_jad(props)
def get_xform_html_summary(self):
'''This is used by the view. It is pretty cool, but perhaps misplaced.'''
to_return = []
for form in self.xforms.all():
try:
to_return.append(form.get_link())
except Exception, e:
# we don't care about this
pass
if to_return:
return "<br>".join(to_return)
else:
return "No X-Forms found"
def get_zip_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct zipfile download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
self.get_zip_filename()))
def get_jar_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jar_file)))
def get_jad_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jad_file)))
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewLog.html?buildTypeId=bt%s&buildNumber=%s' % \
(self.project.project_id, self.build_number)
def set_jadfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'w')
fout.write( filestream.read() )
fout.close()
self.jad_file = new_file_name
except Exception, e:
logging.error("Error, saving jadfile failed", extra={"exception":e, "jad_filename":filename})
def set_jarfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'wb')
fout.write( filestream.read() )
fout.close()
self.jar_file = new_file_name
except Exception, e:
logging.error("Error, saving jarfile failed", extra={"exception":e, "jar_filename":filename})
def _get_destination(self):
"""The directory this build saves its data to. Defined in
the config and then /xforms/<project_id>/<build_id>/ is
appended. If it doesn't exist, the directory is
created by this method."""
destinationpath = os.path.join(BUILDFILES_PATH,
str(self.project.id),
str(self.build_number))
if not os.path.exists(destinationpath):
os.makedirs(destinationpath)
return destinationpath
def validate_jar(self, include_xforms=False):
'''Validates this build's jar file. By default, does NOT validate
the jar's xforms.'''
validate_jar(self.jar_file, include_xforms)
def validate_xforms(self):
'''Validates this build's xforms.'''
errors = []
for form in self.xforms.all():
try:
xformvalidator.validate(form.file_location)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
def check_and_release_xforms(self):
'''Checks this build's xforms against the xformmanager and releases
them, if they pass compatibility tests'''
errors = []
to_skip = []
to_register = []
for form in self.xforms.all():
try:
formdef = xformvalidator.validate(form.file_location)
modelform = FormDefModel.get_model(formdef.target_namespace,
self.project.domain,
formdef.version)
if modelform:
# if the model form exists we must ensure it is compatible
# with the version we are trying to release
existing_formdef = modelform.to_formdef()
differences = existing_formdef.get_differences(formdef)
if differences.is_empty():
# this is all good
to_skip.append(form)
else:
raise BuildError("""Schema %s is not compatible with %s.
Because of the following differences:
%s
You must update your version number!"""
% (existing_formdef, formdef, differences))
else:
# this must be registered
to_register.append(form)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
# finally register
manager = XFormManager()
# TODO: we need transaction management
for form in to_register:
try:
formdefmodel = manager.add_schema(form.get_file_name(),
form.as_filestream(),
self.project.domain)
upload_info = self.upload_information
if upload_info:
formdefmodel.submit_ip = upload_info.ip
user = upload_info.user
else:
formdefmodel.submit_ip = UNKNOWN_IP
user = self.uploaded_by
formdefmodel.uploaded_by = user
formdefmodel.bytes_received = form.size
formdefmodel.form_display_name = form.get_file_name()
formdefmodel.save()
except Exception, e:
# log the error with the stack, otherwise this is hard to track down
info = sys.exc_info()
logging.error("Error registering form in build manager: %s\n%s" % \
(e, traceback.print_tb(info[2])))
errors.append(e)
if errors:
raise BuildError("Problem registering xforms for %s!" % self, errors)
def set_jad_released(self):
'''Set the appropriate 'release' properties in the jad'''
self.add_jad_properties({
'Build-Number': '*' + str(self.get_release_number()), #remove * once we get a real build number
'Released-on': time.strftime('%Y-%b-%d %H:%M', time.gmtime())
})
#FIXME!
def get_release_number(self):
'''return an incrementing build number per released build, unique across all builds for a given commcare project'''
import random
return random.randint(1000, 9999) #return a high random number until we get the incrementing plugged in
def release(self, user):
'''Release a build. This does a number of things:
1. Validates the Jar. The specifics of this are still in flux but at the very
least it should be extractable, and there should be at least one xform.
2. Ensures all the xforms have valid xmlns, version, and uiversion attributes
3. Checks if xforms with the same xmlns and version are registered already
If so: ensures the current forms are compatible with the registered forms
If not: registers the forms
4. Updates the build status to be released, sets the released and
released_by properties
This method will raise an exception if, for any reason above, the build cannot
be released.'''
if self.status == "release":
raise BuildError("Tried to release an already released build!")
else:
# TODO: we need transaction management. Any of these steps can raise exceptions
self.validate_jar()
self.validate_xforms()
self.check_and_release_xforms()
self.set_jad_released()
self.status = "release"
self.released = datetime.now()
self.released_by = user
self.save()
logging.error("%s just released build %s! We just thought you might want to be keeping tabs..." %
(user, self.get_display_string()))
def extract_and_link_xforms(sender, instance, created, **kwargs):
'''Extracts all xforms from this build's jar and creates
references on disk and model objects for them.'''
# only do this the first time we save, not on updates
if not created:
return
try:
xforms = extract_xforms(instance.jar_file, instance._get_destination())
for form in xforms:
form_model = BuildForm.objects.create(build=instance, file_location=form)
num_created = len(instance.xforms.all())
if num_created == 0:
logging.warn("Build %s didn't have any linked xforms! Why not?!" % instance)
except Exception, e:
logging.error("Problem extracting xforms for build: %s, the error is: %s" %\
(instance, e))
post_save.connect(extract_and_link_xforms, sender=ProjectBuild)
class BuildForm(models.Model):
"""Class representing the location of a single build's xform on
the file system."""
build = models.ForeignKey(ProjectBuild, related_name="xforms")
file_location = models.FilePathField(_('Xform Location'),
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
def get_file_name(self):
'''Get a readable file name for this xform'''
return os.path.basename(self.file_location)
@property
def size(self):
return os.path.getsize(self.file_location)
def get_url(self):
'''Get the url where you can view this form'''
return reverse('get_build_xform', args=(self.id,))
def as_filestream(self):
'''Gets a raw handle to the form as a file stream'''
try:
fin = open(self.file_location,'r')
return fin
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def get_text(self):
'''Gets the body of the xform, as text'''
try:
file = self.as_filestream()
text = file.read()
file.close()
return text
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def to_html(self):
'''Gets the body of the xform, as pretty printed text'''
raw_body = self.get_text()
if pygments_found:
return highlight(raw_body, HtmlLexer(), HtmlFormatter())
return raw_body
def get_link(self):
'''A clickable html displayable version of this for use in templates'''
return '<a href=%s target=_blank>%s</a>' % (self.get_url(), self.get_file_name())
def __unicode__(self):
return "%s: %s" % (self.build, self.get_file_name())
BUILD_FILE_TYPE = (
('jad', '.jad file'),
('jar', '.jar file'),
)
class BuildUpload(models.Model):
"""Represents an instance of the upload of a build."""
build = models.ForeignKey(ProjectBuild, unique=True)
log = models.ForeignKey(RequestLog, unique=True)
class BuildDownload(models.Model):
"""Represents an instance of a download of a build file. Included are the
type of file, the build id, and the request log."""
type = models.CharField(max_length=3, choices=BUILD_FILE_TYPE)
build = models.ForeignKey(ProjectBuild, related_name="downloads")
log = models.ForeignKey(RequestLog, unique=True)
def __unicode__(self):
return "%s download for build %s. Request: %s" %\
(self.type, self.build, self.log)
| bsd-3-clause | 623,411,994,008,804,400 | 40.657627 | 162 | 0.539339 | false |
chiahaoliu/2016_summer_XPD | XPD_view/XPD_view_1.py | 1 | 6464 | """
This file will contain the code to create the XPD view GUI
"""
from xray_vision.qt_widgets import CrossSectionMainWindow
from PyQt4 import QtGui, QtCore
import os
import sys
import numpy as np
from Tif_File_Finder import TifFileFinder
from plot_analysis import reducedRepPlot
def data_gen(length):
x, y = [_ * 2 * np.pi / 200 for _ in np.ogrid[-200:200, -200:200]]
rep = int(np.sqrt(length))
data = []
for idx in range(length):
kx = idx // rep + 1
ky = idx % rep
data.append(np.sin(kx * x) * np.cos(ky * y) + 1.05)
return data
class Display(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setWindowTitle('XPD View')
self.analysis_type = None
self.file_path = None
self.key_list = ['Home']
self.data_list = data_gen(1)
self.Tif = TifFileFinder()
self._main_window = CrossSectionMainWindow(data_list=self.data_list,
key_list=self.key_list,
cmap='RdBu')
self._main_window.setFocus()
self.setCentralWidget(self._main_window)
# set path option
setpath = QtGui.QAction("&Set Directory", self)
setpath.setShortcut("Ctrl+O")
setpath.setStatusTip("Set image directory")
setpath.triggered.connect(self.set_path)
# sets up refresh button
refresh = QtGui.QAction("&Refresh Files", self)
refresh.triggered.connect(self.refresh)
# set analysis type options
select_mean = QtGui.QAction("&mean", self)
select_mean.triggered.connect(self.set_type_mean)
select_std_dev = QtGui.QAction("&standard deviation", self)
select_std_dev.triggered.connect(self.set_type_stddev)
select_min = QtGui.QAction("&min", self)
select_min.triggered.connect(self.set_type_min)
select_max = QtGui.QAction("&max", self)
select_max.triggered.connect(self.set_type_max)
select_total_intensity = QtGui.QAction("&total intensity", self)
select_total_intensity.triggered.connect(self.set_type_total)
plt_action = QtGui.QAction("&Plot", self)
plt_action.triggered.connect(self.plot_analysis)
self.statusBar()
# This sets up all of the menu widgets that are used in the GUI
mainmenu = self.menuBar()
filemenu = mainmenu.addMenu("&File")
graph_menu = mainmenu.addMenu('&Reduced Represenation')
analysis_submenu = QtGui.QMenu("analysis settings", graph_menu)
filemenu.addAction(setpath)
filemenu.addAction(refresh)
analysis_submenu.addAction(select_max)
analysis_submenu.addAction(select_min)
analysis_submenu.addAction(select_mean)
analysis_submenu.addAction(select_std_dev)
analysis_submenu.addAction(select_total_intensity)
graph_menu.addMenu(analysis_submenu)
graph_menu.addAction(plt_action)
self._main_window._messenger._ctrl_widget._spin_img.valueChanged.connect(self.thingy)
self.show()
def set_path(self):
popup = QtGui.QFileDialog()
self.file_path = str(popup.getExistingDirectory())
self.Tif._directory_name = self.file_path
self.Tif.get_file_list()
self.update_data(self.Tif.pic_list, self.Tif.file_list)
def set_type_mean(self):
self.analysis_type = "mean"
print("mean")
def set_type_min(self):
self.analysis_type = "min"
print("min")
def set_type_stddev(self):
self.analysis_type = "sigma"
print("sigma")
def set_type_max(self):
self.analysis_type = "max"
print("max")
def set_type_total(self):
self.analysis_type = "total intensity"
print("total intensity")
def plot_analysis(self):
try:
rpp = reducedRepPlot(self.data_list, 0, 400, 0, 400, self.analysis_type)
rpp.plot()
except NotADirectoryError:
print("exception excepted")
err_msg_file = QtGui.QMessageBox()
err_msg_file.setIcon(QtGui.QMessageBox.Critical)
err_msg_file.setWindowTitle("Error")
err_msg_file.setText("You did not specify a file path.")
err_msg_file.setInformativeText("click open to set the file path")
err_msg_file.setStandardButtons(QtGui.QMessageBox.Open)
err_msg_file.buttonClicked.connect(self.set_path)
err_msg_file.exec_()
except AssertionError:
err_msg_analysis = QtGui.QMessageBox()
err_msg_analysis.setIcon(QtGui.QMessageBox.Critical)
err_msg_analysis.setWindowTitle("Error")
err_msg_analysis.setText("You did not specify an analysis type")
err_msg_analysis.setInformativeText("please go to the menu and select an analysis type before proceeding")
err_msg_analysis.setStandardButtons(QtGui.QMessageBox.Close)
# err_msg_analysis.buttonClicked.connect(self.set_path)
err_msg_analysis.exec_()
def refresh(self):
new_file_names, new_data = self.Tif.get_new_files()
if len(new_file_names) == 0:
print("No new .tif files found")
else:
self.update_data(new_data, new_file_names)
def update_data(self, data_list, file_list):
# This method updates the data in the image displayer taking in some new data list and some other
# list that is normally the list of File names
old_length = len(self.key_list)
for file in file_list:
self.key_list.append(file)
for data in data_list:
self.data_list.append(data)
for i in range(old_length, len(self.key_list)):
self._main_window._messenger._view._data_dict[self.key_list[i]] = self.data_list[i]
self._main_window._messenger._ctrl_widget._slider_img.setMaximum(len(self.key_list) - 1)
self._main_window._messenger._ctrl_widget._spin_img.setMaximum(len(self.key_list) - 1)
def thingy(self, val):
print(val)
def main():
app = QtGui.QApplication(sys.argv)
viewer = Display()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| bsd-2-clause | -7,495,527,753,662,878,000 | 35.149425 | 118 | 0.603651 | false |
hacklabr/geodjango-boundaries | boundaries/models.py | 1 | 1543 | # -*- coding: utf-8 -*-
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.gis.db import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=255)
geometry = models.MultiPolygonField() # Multipolygon in NAD83
objects = models.GeoManager()
class Meta:
abstract = True
def __str__(self):
return self.name
class Country(NamedModel):
iso_code = models.CharField(max_length=4, blank=True)
class State(NamedModel):
label = models.CharField(max_length=255)
acronym = models.CharField(max_length=64, blank=True, null=True)
region = models.CharField(max_length=255, blank=True, null=True)
country = models.ForeignKey(Country)
class City(NamedModel):
label = models.CharField(max_length=255)
region = models.CharField(max_length=255, blank=True, null=True)
state = models.ForeignKey(State, blank=True, null=True)
country = models.ForeignKey(Country)
cities_ibge_mapping = {
'name': 'NOME_MUNIC',
'region': 'REGIão',
'state': {'acronym': 'SIGLA'},
'geometry': 'POLYGON',
}
# Mapping dictionaries for the models above.
state_ibge_mapping = {
'acronym': 'SIGLA',
'country': {'name': 'pais'},
# 'geometry': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS,
'geometry': 'POLYGON',
}
country_ibge_mapping = {
'name': 'name',
'geometry': 'POLYGON',
}
argentinian_cities_mapping = {
'name': 'FNA',
'geometry': 'POLYGON'
}
| bsd-3-clause | 5,712,905,196,875,916,000 | 23.870968 | 86 | 0.669909 | false |
dangra/scrapy | tests/test_crawler.py | 2 | 16388 | import logging
import os
import platform
import subprocess
import sys
import warnings
from unittest import skipIf
from pytest import raises, mark
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial import unittest
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
from scrapy.utils.test import get_testenv
from tests.mockserver import MockServer
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def setUp(self):
self.crawler = Crawler(DefaultSpider, Settings())
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = Crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = Crawler(MySpider, {})
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
Crawler(MySpider, {})
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = self.mktemp()
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': log_file,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = Crawler(MySpider, {})
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
with open(log_file, 'rb') as fo:
logged = fo.read().decode('utf8')
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': SpiderLoaderWithWrongInterface,
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
@mark.usefixtures('reactor_pytest')
class CrawlerRunnerHasSpider(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
def test_crawler_runner_asyncio_enabled_true(self):
if self.reactor_pytest == 'asyncio':
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_crawler_process_asyncio_enabled_true(self):
with LogCapture(level=logging.DEBUG) as log:
if self.reactor_pytest == 'asyncio':
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
yield runner.crawl(NoRequestsSpider)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
def test_crawler_process_asyncio_enabled_false(self):
runner = CrawlerProcess(settings={"TWISTED_REACTOR": None})
with LogCapture(level=logging.DEBUG) as log:
yield runner.crawl(NoRequestsSpider)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
class ScriptRunnerMixin:
def run_script(self, script_name, *script_args):
script_path = os.path.join(self.script_dir, script_name)
args = [sys.executable, script_path] + list(script_args)
p = subprocess.Popen(args, env=get_testenv(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stderr.decode('utf-8')
class CrawlerProcessSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerProcess')
def test_simple(self):
log = self.run_script('simple.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_no_reactor(self):
log = self.run_script('asyncio_enabled_no_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_reactor(self):
log = self.run_script('asyncio_enabled_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_ipv6_default_name_resolver(self):
log = self.run_script('default_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1,", log)
self.assertIn(
"twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1.",
log)
def test_caching_hostname_resolver_ipv6(self):
log = self.run_script("caching_hostname_resolver_ipv6.py")
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_caching_hostname_resolver_finite_execution(self):
with MockServer() as mock_server:
http_address = mock_server.http_address.replace("0.0.0.0", "127.0.0.1")
log = self.run_script("caching_hostname_resolver.py", http_address)
self.assertIn("Spider closed (finished)", log)
self.assertNotIn("ERROR: Error downloading", log)
self.assertNotIn("TimeoutError", log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
@mark.skipif(platform.system() == 'Windows', reason="PollReactor is not supported on Windows")
def test_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.pollreactor.PollReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
def test_custom_loop_asyncio(self):
log = self.run_script("asyncio_custom_loop.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
@mark.skipif(sys.implementation.name == "pypy", reason="uvloop does not support pypy properly")
@mark.skipif(platform.system() == "Windows", reason="uvloop does not support Windows")
def test_custom_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py", "uvloop.Loop")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_default_loop_asyncio_deferred_signal(self):
log = self.run_script("asyncio_deferred_signal.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
self.assertNotIn("Using asyncio event loop: uvloop.Loop", log)
self.assertIn("async pipeline opened!", log)
class CrawlerRunnerSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerRunner')
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
self.assertIn("INFO: Spider closed (finished)", log)
self.assertIn("INFO: Host: not.a.real.domain", log)
self.assertIn("INFO: Type: <class 'ipaddress.IPv4Address'>", log)
self.assertIn("INFO: IP address: 127.0.0.1", log)
| bsd-3-clause | 6,069,620,214,822,966,000 | 40.463291 | 112 | 0.666992 | false |
metacloud/python-cinderclient | cinderclient/v1/volume_types.py | 1 | 3496 | # Copyright (c) 2011 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Volume Type interface.
"""
from cinderclient import base
class VolumeType(base.Resource):
"""
A Volume Type is the type of volume to be created
"""
def __repr__(self):
return "<VolumeType: %s>" % self.name
def get_keys(self):
"""
Get extra specs from a volume type.
:param vol_type: The :class:`VolumeType` to get extra specs from
"""
_resp, body = self.manager.api.client.get(
"/types/%s/extra_specs" %
base.getid(self))
return body["extra_specs"]
def set_keys(self, metadata):
"""
Set extra specs on a volume type.
:param type : The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
"""
body = {'extra_specs': metadata}
return self.manager._create(
"/types/%s/extra_specs" % base.getid(self),
body,
"extra_specs",
return_raw=True)
def unset_keys(self, keys):
"""
Unset extra specs on a volume type.
:param type_id: The :class:`VolumeType` to unset extra spec on
:param keys: A list of keys to be unset
"""
# NOTE(jdg): This wasn't actually doing all of the keys before
# the return in the loop resulted in ony ONE key being unset.
# since on success the return was NONE, we'll only interrupt the loop
# and return if there's an error
resp = None
for k in keys:
resp = self.manager._delete(
"/types/%s/extra_specs/%s" % (
base.getid(self), k))
if resp is not None:
return resp
class VolumeTypeManager(base.ManagerWithFind):
"""
Manage :class:`VolumeType` resources.
"""
resource_class = VolumeType
def list(self, search_opts=None):
"""
Get a list of all volume types.
:rtype: list of :class:`VolumeType`.
"""
return self._list("/types", "volume_types")
def get(self, volume_type):
"""
Get a specific volume type.
:param volume_type: The ID of the :class:`VolumeType` to get.
:rtype: :class:`VolumeType`
"""
return self._get("/types/%s" % base.getid(volume_type), "volume_type")
def delete(self, volume_type):
"""
Delete a specific volume_type.
:param volume_type: The name or ID of the :class:`VolumeType` to get.
"""
self._delete("/types/%s" % base.getid(volume_type))
def create(self, name):
"""
Create a volume type.
:param name: Descriptive name of the volume type
:rtype: :class:`VolumeType`
"""
body = {
"volume_type": {
"name": name,
}
}
return self._create("/types", body, "volume_type")
| apache-2.0 | -7,710,020,607,655,474,000 | 27.655738 | 78 | 0.578661 | false |
mefly2012/platform | test/guozibeijing/deal_repeat.py | 1 | 1867 | # coding=utf-8
import json
import codecs
import os
import sys
import re
if __name__ == '__main__':
fr1 = codecs.open(sys.argv[1], 'r', encoding='utf-8') # 我统计的
fr2 = codecs.open(sys.argv[2], 'r', encoding='utf-8') # 君哥统计的
dics1 = []
dics2 = []
for i in fr1:
js = json.loads(i)
js["bbd_dotime"] = js.get("bbd_dotime").replace(u'年', '-').replace(u'月', '-').replace(u'日', '')
dics1.append(js)
for i in fr2:
js = json.loads(i)
match = re.compile(u'^(\d+).(\d+).(\d+)$').match(js["bbd_dotime"])
js["bbd_dotime"] = "%4d-%2d-%2d" % (int(match.group(1)), int(match.group(2)), int(match.group(3)))
dics2.append(js)
dics1.sort(key=lambda x: x['company_name'])
dics2.sort(key=lambda x: x['company_name'])
# sorted(dics1, key=lambda x: x['company_name'])
# sorted(dics2, key=lambda x: x['company_name'])
first = True
company = ''
myout = []
current = []
dics1.append({})
for dc in dics1:
if company != dc.get('company_name', ''):
# 选出时间最大
if first:
first = False
company = dc.get('company_name', '')
current.append(dc)
continue
company = dc.get('company_name', '')
max_dc = max(current, key=lambda x: x['bbd_uptime'])
myout.append(max_dc)
current = []
current.append(dc)
pass
else:
current.append(dc)
print len(myout)
# for i in myout:
# find = False
# for j in dics2:
# if i == j:
# find = True
# if find:
# print json.dumps(i, ensure_ascii=False)
for i in myout:
if i not in dics2:
print json.dumps(i, ensure_ascii=False)
| apache-2.0 | 2,724,095,015,985,590,300 | 25.536232 | 106 | 0.495358 | false |
klose911/klose911.github.io | src/python/src/lisp/lispy.py | 1 | 12433 | ################ Scheme Interpreter in Python
## (c) Peter Norvig, 2010; See http://norvig.com/lispy2.html
################ Symbol, Procedure, classes
from __future__ import division
from __future__ import print_function
import re, sys
from io import StringIO
class Symbol(str): pass
def Sym(s, symbol_table={}):
"Find or create unique Symbol entry for str s in symbol table."
if s not in symbol_table: symbol_table[s] = Symbol(s)
return symbol_table[s]
_quote, _if, _set, _define, _lambda, _begin, _definemacro, = map(Sym,
"quote if set! define lambda begin define-macro".split())
_quasiquote, _unquote, _unquotesplicing = map(Sym,
"quasiquote unquote unquote-splicing".split())
class Procedure(object):
"A user-defined Scheme procedure."
def __init__(self, parms, exp, env):
self.parms, self.exp, self.env = parms, exp, env
def __call__(self, *args):
return eval(self.exp, Env(self.parms, args, self.env))
################ parse, read, and user interaction
def parse(inport):
"Parse a program: read and expand/error-check it."
# Backwards compatibility: given a str, convert it to an InPort
if isinstance(inport, str): inport = InPort(StringIO(inport))
return expand(read(inport), toplevel=True)
eof_object = Symbol('#<eof-object>') # Note: uninterned; can't be read
class InPort(object):
"An input port. Retains a line of chars."
tokenizer = r"""\s*(,@|[('`,)]|"(?:[\\].|[^\\"])*"|;.*|[^\s('"`,;)]*)(.*)"""
def __init__(self, file):
self.file = file; self.line = ''
def next_token(self):
"Return the next token, reading new text into line buffer if needed."
while True:
if self.line == '': self.line = self.file.readline()
if self.line == '': return eof_object
token, self.line = re.match(InPort.tokenizer, self.line).groups()
if token != '' and not token.startswith(';'):
return token
def readchar(inport):
"Read the next character from an input port."
if inport.line != '':
ch, inport.line = inport.line[0], inport.line[1:]
return ch
else:
return inport.file.read(1) or eof_object
def read(inport):
"Read a Scheme expression from an input port."
def read_ahead(token):
if '(' == token:
L = []
while True:
token = inport.next_token()
if token == ')': return L
else: L.append(read_ahead(token))
elif ')' == token: raise SyntaxError('unexpected )')
elif token in quotes: return [quotes[token], read(inport)]
elif token is eof_object: raise SyntaxError('unexpected EOF in list')
else: return atom(token)
# body of read:
token1 = inport.next_token()
return eof_object if token1 is eof_object else read_ahead(token1)
quotes = {"'":_quote, "`":_quasiquote, ",":_unquote, ",@":_unquotesplicing}
def atom(token):
'Numbers become numbers; #t and #f are booleans; "..." string; otherwise Symbol.'
if token == '#t': return True
elif token == '#f': return False
elif token[0] == '"': return token[1:-1].decode('string_escape')
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
try: return complex(token.replace('i', 'j', 1))
except ValueError:
return Sym(token)
def to_string(x):
"Convert a Python object back into a Lisp-readable string."
if x is True: return "#t"
elif x is False: return "#f"
elif isa(x, Symbol): return x
elif isa(x, str): return '"%s"' % x.encode('string_escape').replace('"',r'\"')
elif isa(x, list): return '('+' '.join(map(to_string, x))+')'
elif isa(x, complex): return str(x).replace('j', 'i')
else: return str(x)
def load(filename):
"Eval every expression from a file."
repl(None, InPort(open(filename)), None)
def repl(prompt='lispy> ', inport=InPort(sys.stdin), out=sys.stdout):
"A prompt-read-eval-print loop."
sys.stderr.write("Lispy version 2.0\n")
while True:
try:
if prompt: sys.stderr.write(prompt)
x = parse(inport)
if x is eof_object: return
val = eval(x)
if val is not None and out: print(to_string(val), file=out)
except Exception as e:
print('%s: %s' % (type(e).__name__, e))
################ Environment class
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
# Bind parm list to corresponding args, or single parm to list of args
self.outer = outer
if isa(parms, Symbol):
self.update({parms:list(args)})
else:
if len(args) != len(parms):
raise TypeError('expected %s, given %s, '
% (to_string(parms), to_string(args)))
self.update(zip(parms,args))
def find(self, var):
"Find the innermost Env where var appears."
if var in self: return self
elif self.outer is None: raise LookupError(var)
else: return self.outer.find(var)
def is_pair(x): return x != [] and isa(x, list)
def cons(x, y): return [x]+y
def callcc(proc):
"Call proc with current continuation; escape only"
ball = RuntimeWarning("Sorry, can't continue this continuation any longer.")
def throw(retval): ball.retval = retval; raise ball
try:
return proc(throw)
except RuntimeWarning as w:
if w is ball: return ball.retval
else: raise w
def add_globals(self):
"Add some Scheme standard procedures."
import math, cmath, operator as op
self.update(vars(math))
self.update(vars(cmath))
self.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv, 'not':op.not_,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'equal?':op.eq, 'eq?':op.is_, 'length':len, 'cons':cons,
'car':lambda x:x[0], 'cdr':lambda x:x[1:], 'append':op.add,
'list':lambda *x:list(x), 'list?': lambda x:isa(x,list),
'null?':lambda x:x==[], 'symbol?':lambda x: isa(x, Symbol),
'boolean?':lambda x: isa(x, bool), 'pair?':is_pair,
'port?': lambda x:isa(x,file), 'apply':lambda proc,l: proc(*l),
'eval':lambda x: eval(expand(x)), 'load':lambda fn: load(fn), 'call/cc':callcc,
'open-input-file':open,'close-input-port':lambda p: p.file.close(),
'open-output-file':lambda f:open(f,'w'), 'close-output-port':lambda p: p.close(),
'eof-object?':lambda x:x is eof_object, 'read-char':readchar,
'read':read, 'write':lambda x,port=sys.stdout:port.write(to_string(x)),
'display':lambda x,port=sys.stdout:port.write(x if isa(x,str) else to_string(x))})
return self
isa = isinstance
global_env = add_globals(Env())
################ eval (tail recursive)
def eval(x, env=global_env):
"Evaluate an expression in an environment."
while True:
if isa(x, Symbol): # variable reference
return env.find(x)[x]
elif not isa(x, list): # constant literal
return x
elif x[0] is _quote: # (quote exp)
(_, exp) = x
return exp
elif x[0] is _if: # (if test conseq alt)
(_, test, conseq, alt) = x
x = (conseq if eval(test, env) else alt)
elif x[0] is _set: # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
return None
elif x[0] is _define: # (define var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
return None
elif x[0] is _lambda: # (lambda (var*) exp)
(_, vars, exp) = x
return Procedure(vars, exp, env)
elif x[0] is _begin: # (begin exp+)
for exp in x[1:-1]:
eval(exp, env)
x = x[-1]
else: # (proc exp*)
exps = [eval(exp, env) for exp in x]
proc = exps.pop(0)
if isa(proc, Procedure):
x = proc.exp
env = Env(proc.parms, exps, proc.env)
else:
return proc(*exps)
################ expand
def expand(x, toplevel=False):
"Walk tree of x, making optimizations/fixes, and signaling SyntaxError."
require(x, x!=[]) # () => Error
if not isa(x, list): # constant => unchanged
return x
elif x[0] is _quote: # (quote exp)
require(x, len(x)==2)
return x
elif x[0] is _if:
if len(x)==3: x = x + [None] # (if t c) => (if t c None)
require(x, len(x)==4)
return map(expand, x)
elif x[0] is _set:
require(x, len(x)==3);
var = x[1] # (set! non-var exp) => Error
require(x, isa(var, Symbol), "can set! only a symbol")
return [_set, var, expand(x[2])]
elif x[0] is _define or x[0] is _definemacro:
require(x, len(x)>=3)
_def, v, body = x[0], x[1], x[2:]
if isa(v, list) and v: # (define (f args) body)
f, args = v[0], v[1:] # => (define f (lambda (args) body))
return expand([_def, f, [_lambda, args]+body])
else:
require(x, len(x)==3) # (define non-var/list exp) => Error
require(x, isa(v, Symbol), "can define only a symbol")
exp = expand(x[2])
if _def is _definemacro:
require(x, toplevel, "define-macro only allowed at top level")
proc = eval(exp)
require(x, callable(proc), "macro must be a procedure")
macro_table[v] = proc # (define-macro v proc)
return None # => None; add v:proc to macro_table
return [_define, v, exp]
elif x[0] is _begin:
if len(x)==1: return None # (begin) => None
else: return [expand(xi, toplevel) for xi in x]
elif x[0] is _lambda: # (lambda (x) e1 e2)
require(x, len(x)>=3) # => (lambda (x) (begin e1 e2))
vars, body = x[1], x[2:]
require(x, (isa(vars, list) and all(isa(v, Symbol) for v in vars))
or isa(vars, Symbol), "illegal lambda argument list")
exp = body[0] if len(body) == 1 else [_begin] + body
return [_lambda, vars, expand(exp)]
elif x[0] is _quasiquote: # `x => expand_quasiquote(x)
require(x, len(x)==2)
return expand_quasiquote(x[1])
elif isa(x[0], Symbol) and x[0] in macro_table:
return expand(macro_table[x[0]](*x[1:]), toplevel) # (m arg...)
else: # => macroexpand if m isa macro
return map(expand, x) # (f arg...) => expand each
def require(x, predicate, msg="wrong length"):
"Signal a syntax error if predicate is false."
if not predicate: raise SyntaxError(to_string(x)+': '+msg)
_append, _cons, _let = map(Sym, "append cons let".split())
def expand_quasiquote(x):
"""Expand `x => 'x; `,x => x; `(,@x y) => (append x y) """
if not is_pair(x):
return [_quote, x]
require(x, x[0] is not _unquotesplicing, "can't splice here")
if x[0] is _unquote:
require(x, len(x)==2)
return x[1]
elif is_pair(x[0]) and x[0][0] is _unquotesplicing:
require(x[0], len(x[0])==2)
return [_append, x[0][1], expand_quasiquote(x[1:])]
else:
return [_cons, expand_quasiquote(x[0]), expand_quasiquote(x[1:])]
def let(*args):
args = list(args)
x = cons(_let, args)
require(x, len(args)>1)
bindings, body = args[0], args[1:]
require(x, all(isa(b, list) and len(b)==2 and isa(b[0], Symbol)
for b in bindings), "illegal binding list")
vars, vals = zip(*bindings)
return [[_lambda, list(vars)]+map(expand, body)] + map(expand, vals)
macro_table = {_let:let} ## More macros can go here
eval(parse("""(begin
(define-macro and (lambda args
(if (null? args) #t
(if (= (length args) 1) (car args)
`(if ,(car args) (and ,@(cdr args)) #f)))))
;; More macros can also go here
)"""))
if __name__ == '__main__':
repl()
| apache-2.0 | 21,476,832,186,802,524 | 37.974922 | 87 | 0.541945 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/BioSQL/BioSeqDatabase.py | 1 | 30416 | # Copyright 2002 by Andrew Dalke. All rights reserved.
# Revisions 2007-2014 copyright by Peter Cock. All rights reserved.
# Revisions 2009 copyright by Cymon J. Cox. All rights reserved.
# Revisions 2013-2014 copyright by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Note that BioSQL (including the database schema and scripts) is
# available and licensed separately. Please consult www.biosql.org
"""Connect with a BioSQL database and load Biopython like objects from it.
This provides interfaces for loading biological objects from a relational
database, and is compatible with the BioSQL standards.
"""
import os
import sys
from Bio._py3k import _universal_read_mode
from Bio._py3k import _bytes_bytearray_to_str as bytearray_to_str
from Bio import BiopythonDeprecationWarning
from . import BioSeq
from . import Loader
from . import DBUtils
_POSTGRES_RULES_PRESENT = False # Hack for BioSQL Bug 2839
def open_database(driver="MySQLdb", **kwargs):
"""Main interface for loading a existing BioSQL-style database.
This function is the easiest way to retrieve a connection to a
database, doing something like:
>>> from BioSeq import BioSeqDatabase
>>> server = BioSeqDatabase.open_database(user="root", db="minidb")
Arguments:
- driver - The name of the database driver to use for connecting. The
driver should implement the python DB API. By default, the MySQLdb
driver is used.
- user -the username to connect to the database with.
- password, passwd - the password to connect with
- host - the hostname of the database
- database or db - the name of the database
"""
if driver == "psycopg":
raise ValueError("Using BioSQL with psycopg (version one) is no "
"longer supported. Use psycopg2 instead.")
if os.name == "java":
from com.ziclix.python.sql import zxJDBC
module = zxJDBC
if driver in ["MySQLdb"]:
jdbc_driver = "com.mysql.jdbc.Driver"
url_pref = "jdbc:mysql://" + kwargs["host"] + "/"
elif driver in ["psycopg2"]:
jdbc_driver = "org.postgresql.Driver"
url_pref = "jdbc:postgresql://" + kwargs["host"] + "/"
else:
module = __import__(driver, fromlist=["connect"])
connect = module.connect
# Different drivers use different keywords...
kw = kwargs.copy()
if driver in ["MySQLdb", "mysql.connector"] and os.name != "java":
if "database" in kw:
kw["db"] = kw["database"]
del kw["database"]
if "password" in kw:
kw["passwd"] = kw["password"]
del kw["password"]
# kw["charset"] = "utf8"
# kw["use_unicode"] = True
else:
# DB-API recommendations
if "db" in kw:
kw["database"] = kw["db"]
del kw["db"]
if "passwd" in kw:
kw["password"] = kw["passwd"]
del kw["passwd"]
if driver in ["psycopg2", "pgdb"] and not kw.get("database"):
kw["database"] = "template1"
# SQLite connect takes the database name as input
if os.name == "java":
if driver in ["MySQLdb"]:
conn = connect(url_pref + kw.get("database", "mysql"),
kw["user"], kw["password"], jdbc_driver)
elif driver in ["psycopg2"]:
conn = connect(url_pref + kw.get("database", "postgresql") +
"?stringtype=unspecified",
kw["user"], kw["password"], jdbc_driver)
elif driver in ["sqlite3"]:
conn = connect(kw["database"])
else:
conn = connect(**kw)
if os.name == "java":
server = DBServer(conn, module, driver)
else:
server = DBServer(conn, module)
# TODO - Remove the following once BioSQL Bug 2839 is fixed.
# Test for RULES in PostgreSQL schema, see also Bug 2833.
if driver in ["psycopg2", "pgdb"]:
sql = "SELECT ev_class FROM pg_rewrite WHERE " + \
"rulename='rule_bioentry_i1' OR " + \
"rulename='rule_bioentry_i2';"
if server.adaptor.execute_and_fetchall(sql):
import warnings
from Bio import BiopythonWarning
warnings.warn("Your BioSQL PostgreSQL schema includes some "
"rules currently required for bioperl-db but "
"which may cause problems loading data using "
"Biopython (see BioSQL Bug 2839). If you do not "
"use BioPerl, please remove these rules. "
"Biopython should cope with the rules present, "
"but with a performance penalty when loading "
"new records.", BiopythonWarning)
global _POSTGRES_RULES_PRESENT
_POSTGRES_RULES_PRESENT = True
return server
class DBServer(object):
"""Represents a BioSQL database continaing namespaces (sub-databases).
This acts like a Python dictionary, giving access to each namespace
(defined by a row in the biodatabase table) as a BioSeqDatabase object.
"""
def __init__(self, conn, module, module_name=None):
self.module = module
if module_name is None:
module_name = module.__name__
if module_name == "mysql.connector" and sys.version_info[0] == 3:
wrap_cursor = True
else:
wrap_cursor = False
# Get module specific Adaptor or the base (general) Adaptor
Adapt = _interface_specific_adaptors.get(module_name, Adaptor)
self.adaptor = Adapt(conn, DBUtils.get_dbutils(module_name),
wrap_cursor=wrap_cursor)
self.module_name = module_name
def __repr__(self):
return self.__class__.__name__ + "(%r)" % self.adaptor.conn
def __getitem__(self, name):
return BioSeqDatabase(self.adaptor, name)
def __len__(self):
"""Number of namespaces (sub-databases) in this database."""
sql = "SELECT COUNT(name) FROM biodatabase;"
return int(self.adaptor.execute_and_fetch_col0(sql)[0])
def __contains__(self, value):
"""Check if a namespace (sub-database) in this database."""
sql = "SELECT COUNT(name) FROM biodatabase WHERE name=%s;"
return bool(self.adaptor.execute_and_fetch_col0(sql, (value,))[0])
def __iter__(self):
"""Iterate over namespaces (sub-databases) in the database."""
# TODO - Iterate over the cursor, much more efficient
return iter(self.adaptor.list_biodatabase_names())
if hasattr(dict, "iteritems"):
# Python 2, use iteritems etc
def keys(self):
"""List of namespaces (sub-databases) in the database."""
return self.adaptor.list_biodatabase_names()
def values(self):
"""List of BioSeqDatabase objects in the database."""
return [self[key] for key in self]
def items(self):
"""List of (namespace, BioSeqDatabase) for entries in the database."""
return [(key, self[key]) for key in self]
def iterkeys(self):
"""Iterate over namespaces (sub-databases) in the database."""
return iter(self)
def itervalues(self):
"""Iterate over BioSeqDatabase objects in the database."""
for key in self:
yield self[key]
def iteritems(self):
"""Iterate over (namespace, BioSeqDatabase) in the database."""
for key in self:
yield key, self[key]
else:
# Python 3, items etc are all iterators
def keys(self):
"""Iterate over namespaces (sub-databases) in the database."""
return iter(self)
def values(self):
"""Iterate over BioSeqDatabase objects in the database."""
for key in self:
yield self[key]
def items(self):
"""Iterate over (namespace, BioSeqDatabase) in the database."""
for key in self:
yield key, self[key]
def __delitem__(self, name):
"""Remove a namespace and all its entries."""
if name not in self:
raise KeyError(name)
db_id = self.adaptor.fetch_dbid_by_dbname(name)
remover = Loader.DatabaseRemover(self.adaptor, db_id)
remover.remove()
def remove_database(self, db_name):
"""Remove a namespace and all its entries (OBSOLETE).
Try to remove all references to items in a database.
server.remove_database(name)
In keeping with the dictionary interface, you can now do this:
del server[name]
"""
import warnings
warnings.warn("This method is deprecated. In keeping with the "
"dictionary interface, you can now use 'del "
"server[name]' instead", BiopythonDeprecationWarning)
self.__delitem__(db_name)
def new_database(self, db_name, authority=None, description=None):
"""Add a new database to the server and return it.
"""
# make the database
sql = r"INSERT INTO biodatabase (name, authority, description)" \
r" VALUES (%s, %s, %s)"
self.adaptor.execute(sql, (db_name, authority, description))
return BioSeqDatabase(self.adaptor, db_name)
def load_database_sql(self, sql_file):
"""Load a database schema into the given database.
This is used to create tables, etc when a database is first created.
sql_file should specify the complete path to a file containing
SQL entries for building the tables.
"""
# Not sophisticated enough for PG schema. Is it needed by MySQL?
# Looks like we need this more complicated way for both. Leaving it
# the default and removing the simple-minded approach.
# read the file with all comment lines removed
sql = ""
with open(sql_file, _universal_read_mode) as sql_handle:
for line in sql_handle:
if line.startswith("--"): # don't include comment lines
pass
elif line.startswith("#"): # ditto for MySQL comments
pass
elif line.strip(): # only include non-blank lines
sql += line.strip() + " "
# two ways to load the SQL
# 1. PostgreSQL can load it all at once and actually needs to
# due to FUNCTION defines at the end of the SQL which mess up
# the splitting by semicolons
if self.module_name in ["psycopg2", "pgdb"]:
self.adaptor.cursor.execute(sql)
# 2. MySQL needs the database loading split up into single lines of
# SQL executed one at a time
elif self.module_name in ["mysql.connector", "MySQLdb", "sqlite3"]:
sql_parts = sql.split(";") # one line per sql command
# don't use the last item, it's blank
for sql_line in sql_parts[:-1]:
self.adaptor.cursor.execute(sql_line)
else:
raise ValueError("Module %s not supported by the loader." %
(self.module_name))
def commit(self):
"""Commits the current transaction to the database."""
return self.adaptor.commit()
def rollback(self):
"""Rolls backs the current transaction."""
return self.adaptor.rollback()
def close(self):
"""Close the connection. No further activity possible."""
return self.adaptor.close()
class _CursorWrapper(object):
"""A wraper for mysql.connector resolving bytestring representations."""
def __init__(self, real_cursor):
self.real_cursor = real_cursor
def execute(self, operation, params=None, multi=False):
self.real_cursor.execute(operation, params, multi)
def _convert_tuple(self, tuple_):
tuple_list = list(tuple_)
for i, elem in enumerate(tuple_list):
if type(elem) is bytes:
tuple_list[i] = elem.decode("utf-8")
return tuple(tuple_list)
def _convert_list(self, lst):
ret_lst = []
for tuple_ in lst:
new_tuple = self._convert_tuple(tuple_)
ret_lst.append(new_tuple)
return ret_lst
def fetchall(self):
rv = self.real_cursor.fetchall()
return self._convert_list(rv)
def fetchone(self):
tuple_ = self.real_cursor.fetchone()
return self._convert_tuple(tuple_)
class Adaptor(object):
"""High level wrapper for a database connection and cursor
Most database calls in BioSQL are done indirectly though this adaptor
class. This provides helper methods for fetching data and executing
sql.
"""
def __init__(self, conn, dbutils, wrap_cursor=False):
self.conn = conn
if wrap_cursor:
self.cursor = _CursorWrapper(conn.cursor())
else:
self.cursor = conn.cursor()
self.dbutils = dbutils
def last_id(self, table):
return self.dbutils.last_id(self.cursor, table)
def autocommit(self, y=True):
"""Set the autocommit mode. True values enable; False value disable."""
return self.dbutils.autocommit(self.conn, y)
def commit(self):
"""Commits the current transaction."""
return self.conn.commit()
def rollback(self):
"""Rolls backs the current transaction."""
return self.conn.rollback()
def close(self):
"""Close the connection. No further activity possible."""
return self.conn.close()
def fetch_dbid_by_dbname(self, dbname):
self.execute(
r"select biodatabase_id from biodatabase where name = %s",
(dbname,))
rv = self.cursor.fetchall()
if not rv:
raise KeyError("Cannot find biodatabase with name %r" % dbname)
return rv[0][0]
def fetch_seqid_by_display_id(self, dbid, name):
sql = r"select bioentry_id from bioentry where name = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find display id %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with display id %r" % name)
return rv[0][0]
def fetch_seqid_by_accession(self, dbid, name):
sql = r"select bioentry_id from bioentry where accession = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find accession %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with accession %r" % name)
return rv[0][0]
def fetch_seqids_by_accession(self, dbid, name):
sql = r"select bioentry_id from bioentry where accession = %s"
fields = [name]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
return self.execute_and_fetch_col0(sql, fields)
def fetch_seqid_by_version(self, dbid, name):
acc_version = name.split(".")
if len(acc_version) > 2:
raise IndexError("Bad version %r" % name)
acc = acc_version[0]
if len(acc_version) == 2:
version = acc_version[1]
else:
version = "0"
sql = r"SELECT bioentry_id FROM bioentry WHERE accession = %s" \
r" AND version = %s"
fields = [acc, version]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find version %r" % name)
if len(rv) > 1:
raise IndexError("More than one entry with version %r" % name)
return rv[0][0]
def fetch_seqid_by_identifier(self, dbid, identifier):
# YB: was fetch_seqid_by_seqid
sql = "SELECT bioentry_id FROM bioentry WHERE identifier = %s"
fields = [identifier]
if dbid:
sql += " and biodatabase_id = %s"
fields.append(dbid)
self.execute(sql, fields)
rv = self.cursor.fetchall()
if not rv:
raise IndexError("Cannot find display id %r" % identifier)
return rv[0][0]
def list_biodatabase_names(self):
return self.execute_and_fetch_col0(
"SELECT name FROM biodatabase")
def list_bioentry_ids(self, dbid):
return self.execute_and_fetch_col0(
"SELECT bioentry_id FROM bioentry WHERE biodatabase_id = %s",
(dbid,))
def list_bioentry_display_ids(self, dbid):
return self.execute_and_fetch_col0(
"SELECT name FROM bioentry WHERE biodatabase_id = %s",
(dbid,))
def list_any_ids(self, sql, args):
"""Return ids given a SQL statement to select for them.
This assumes that the given SQL does a SELECT statement that
returns a list of items. This parses them out of the 2D list
they come as and just returns them in a list.
"""
return self.execute_and_fetch_col0(sql, args)
def execute_one(self, sql, args=None):
"""Execute sql that returns 1 record, and return the record"""
self.execute(sql, args or ())
rv = self.cursor.fetchall()
assert len(rv) == 1, "Expected 1 response, got %d" % len(rv)
return rv[0]
def execute(self, sql, args=None):
"""Just execute an sql command.
"""
if os.name == "java":
sql = sql.replace("%s", "?")
self.dbutils.execute(self.cursor, sql, args)
def get_subseq_as_string(self, seqid, start, end):
length = end - start
# XXX Check this on MySQL and PostgreSQL. substr should be general,
# does it need dbutils?
# return self.execute_one(
# """select SUBSTRING(seq FROM %s FOR %s)
# from biosequence where bioentry_id = %s""",
# (start+1, length, seqid))[0]
#
# Convert to a string on returning for databases that give back
# unicode. Shouldn't need unicode for sequences so this seems safe.
return str(self.execute_one(
"""select SUBSTR(seq, %s, %s)
from biosequence where bioentry_id = %s""",
(start + 1, length, seqid))[0])
def execute_and_fetch_col0(self, sql, args=None):
self.execute(sql, args or ())
return [field[0] for field in self.cursor.fetchall()]
def execute_and_fetchall(self, sql, args=None):
self.execute(sql, args or ())
return self.cursor.fetchall()
class MysqlConnectorAdaptor(Adaptor):
"""A BioSQL Adaptor class with fixes for the MySQL interface
BioSQL was failing due to returns of bytearray objects from
the mysql-connector-python database connector. This adaptor
class scrubs returns of bytearrays and of byte strings converting
them to string objects instead. This adaptor class was made in
response to backwards incompatible changes added to
mysql-connector-python in release 2.0.0 of the package.
"""
def execute_one(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_one(sql, args)
return tuple(bytearray_to_str(v) for v in out)
def execute_and_fetch_col0(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_and_fetch_col0(sql, args)
return [bytearray_to_str(column) for column in out]
def execute_and_fetchall(self, sql, args=None):
out = super(MysqlConnectorAdaptor, self).execute_and_fetchall(sql, args)
return [tuple(bytearray_to_str(v) for v in o) for o in out]
_interface_specific_adaptors = {
# If SQL interfaces require a specific adaptor, use this to map the adaptor
"mysql.connector": MysqlConnectorAdaptor
}
_allowed_lookups = {
# Lookup name / function name to get id, function to list all ids
'primary_id': "fetch_seqid_by_identifier",
'gi': "fetch_seqid_by_identifier",
'display_id': "fetch_seqid_by_display_id",
'name': "fetch_seqid_by_display_id",
'accession': "fetch_seqid_by_accession",
'version': "fetch_seqid_by_version",
}
class BioSeqDatabase(object):
"""Represents a namespace (sub-database) within the BioSQL database.
i.e. One row in the biodatabase table, and all all rows in the bioentry
table associated with it.
"""
def __init__(self, adaptor, name):
self.adaptor = adaptor
self.name = name
self.dbid = self.adaptor.fetch_dbid_by_dbname(name)
def __repr__(self):
return "BioSeqDatabase(%r, %r)" % (self.adaptor, self.name)
def get_Seq_by_id(self, name):
"""Gets a DBSeqRecord object by its name
Example: seq_rec = db.get_Seq_by_id('ROA1_HUMAN')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_display_id(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_acc(self, name):
"""Gets a DBSeqRecord object by accession number
Example: seq_rec = db.get_Seq_by_acc('X77802')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_accession(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_ver(self, name):
"""Gets a DBSeqRecord object by version number
Example: seq_rec = db.get_Seq_by_ver('X77802.1')
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
seqid = self.adaptor.fetch_seqid_by_version(self.dbid, name)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seqs_by_acc(self, name):
"""Gets a list of DBSeqRecord objects by accession number
Example: seq_recs = db.get_Seq_by_acc('X77802')
The name of this method is misleading since it returns a list of
DBSeqRecord objects rather than a list of DBSeq ojbects, and presumably
was to mirror BioPerl.
"""
seqids = self.adaptor.fetch_seqids_by_accession(self.dbid, name)
return [BioSeq.DBSeqRecord(self.adaptor, seqid) for seqid in seqids]
def get_all_primary_ids(self):
"""All the primary_ids of the sequences in the database (OBSOLETE).
These maybe ids (display style) or accession numbers or
something else completely different - they *are not*
meaningful outside of this database implementation.
Please use .keys() instead of .get_all_primary_ids()
"""
import warnings
warnings.warn("Use bio_seq_database.keys() instead of "
"bio_seq_database.get_all_primary_ids()",
BiopythonDeprecationWarning)
return list(self.keys())
def __getitem__(self, key):
return BioSeq.DBSeqRecord(self.adaptor, key)
def __delitem__(self, key):
"""Remove an entry and all its annotation."""
if key not in self:
raise KeyError(key)
# Assuming this will automatically cascade to the other tables...
sql = "DELETE FROM bioentry " + \
"WHERE biodatabase_id=%s AND bioentry_id=%s;"
self.adaptor.execute(sql, (self.dbid, key))
def __len__(self):
"""Number of records in this namespace (sub database)."""
sql = "SELECT COUNT(bioentry_id) FROM bioentry " + \
"WHERE biodatabase_id=%s;"
return int(self.adaptor.execute_and_fetch_col0(sql, (self.dbid, ))[0])
def __contains__(self, value):
"""Check if a primary (internal) id is this namespace (sub database)."""
sql = "SELECT COUNT(bioentry_id) FROM bioentry " + \
"WHERE biodatabase_id=%s AND bioentry_id=%s;"
# The bioentry_id field is an integer in the schema.
# PostgreSQL will throw an error if we use a non integer in the query.
try:
bioentry_id = int(value)
except ValueError:
return False
return bool(self.adaptor.execute_and_fetch_col0(sql,
(self.dbid, bioentry_id))[0])
def __iter__(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
# TODO - Iterate over the cursor, much more efficient
return iter(self.adaptor.list_bioentry_ids(self.dbid))
if hasattr(dict, "iteritems"):
# Python 2, use iteritems etc
def keys(self):
"""List of ids which may not be meaningful outside this database."""
return self.adaptor.list_bioentry_ids(self.dbid)
def values(self):
"""List of DBSeqRecord objects in the namespace (sub database)."""
return [self[key] for key in self]
def items(self):
"""List of (id, DBSeqRecord) for the namespace (sub database)."""
return [(key, self[key]) for key in self]
def iterkeys(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
return iter(self)
def itervalues(self):
"""Iterate over DBSeqRecord objects in the namespace (sub database)."""
for key in self:
yield self[key]
def iteritems(self):
"""Iterate over (id, DBSeqRecord) for the namespace (sub database)."""
for key in self:
yield key, self[key]
else:
# Python 3, items etc are all iterators
def keys(self):
"""Iterate over ids (which may not be meaningful outside this database)."""
return iter(self)
def values(self):
"""Iterate over DBSeqRecord objects in the namespace (sub database)."""
for key in self:
yield self[key]
def items(self):
"""Iterate over (id, DBSeqRecord) for the namespace (sub database)."""
for key in self:
yield key, self[key]
def lookup(self, **kwargs):
if len(kwargs) != 1:
raise TypeError("single key/value parameter expected")
k, v = list(kwargs.items())[0]
if k not in _allowed_lookups:
raise TypeError("lookup() expects one of %r, not %r" %
(list(_allowed_lookups.keys()), k))
lookup_name = _allowed_lookups[k]
lookup_func = getattr(self.adaptor, lookup_name)
seqid = lookup_func(self.dbid, v)
return BioSeq.DBSeqRecord(self.adaptor, seqid)
def get_Seq_by_primary_id(self, seqid):
"""Get a DBSeqRecord by the primary (internal) id (OBSOLETE).
Rather than db.get_Seq_by_primary_id(my_id) use db[my_id]
The name of this method is misleading since it returns a DBSeqRecord
rather than a DBSeq ojbect, and presumably was to mirror BioPerl.
"""
import warnings
warnings.warn("Use bio_seq_database[my_id] instead of "
"bio_seq_database.get_Seq_by_primary_id(my_id)",
BiopythonDeprecationWarning)
return self[seqid]
def load(self, record_iterator, fetch_NCBI_taxonomy=False):
"""Load a set of SeqRecords into the BioSQL database.
record_iterator is either a list of SeqRecord objects, or an
Iterator object that returns SeqRecord objects (such as the
output from the Bio.SeqIO.parse() function), which will be
used to populate the database.
fetch_NCBI_taxonomy is boolean flag allowing or preventing
connection to the taxonomic database on the NCBI server
(via Bio.Entrez) to fetch a detailed taxonomy for each
SeqRecord.
Example:
from Bio import SeqIO
count = db.load(SeqIO.parse(open(filename), format))
Returns the number of records loaded.
"""
db_loader = Loader.DatabaseLoader(self.adaptor, self.dbid,
fetch_NCBI_taxonomy)
num_records = 0
global _POSTGRES_RULES_PRESENT
for cur_record in record_iterator:
num_records += 1
# Hack to work arround BioSQL Bug 2839 - If using PostgreSQL and
# the RULES are present check for a duplicate record before loading
if _POSTGRES_RULES_PRESENT:
# Recreate what the Loader's _load_bioentry_table will do:
if cur_record.id.count(".") == 1:
accession, version = cur_record.id.split('.')
try:
version = int(version)
except ValueError:
accession = cur_record.id
version = 0
else:
accession = cur_record.id
version = 0
gi = cur_record.annotations.get("gi", None)
sql = "SELECT bioentry_id FROM bioentry WHERE (identifier " + \
"= '%s' AND biodatabase_id = '%s') OR (accession = " + \
"'%s' AND version = '%s' AND biodatabase_id = '%s')"
self.adaptor.execute(
sql % (gi, self.dbid, accession, version, self.dbid))
if self.adaptor.cursor.fetchone():
raise self.adaptor.conn.IntegrityError("Duplicate record "
"detected: record has not been inserted")
# End of hack
db_loader.load_seqrecord(cur_record)
return num_records
| apache-2.0 | 1,127,389,766,597,076,100 | 38.044929 | 100 | 0.594523 | false |
SanaMobile/sana.protocol_builder | src-django/api/serializer.py | 1 | 7122 | from collections import OrderedDict
from rest_framework import serializers
from rest_framework.fields import SkipField
from django.contrib.auth.models import User
import models
import field
import json
class ElementSerializer(serializers.ModelSerializer):
choices = field.ArrayAsStringField(required=False)
answer = field.ArrayAsStringField(required=False)
class Meta:
model = models.Element
fields = (
'id',
'display_index',
'concept',
'question',
'answer',
'page',
'choices',
'required',
'image',
'audio',
'action',
'mime_type',
'element_type',
'last_modified',
'created'
)
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, data):
if data['element_type'] in models.Element.CHOICE_TYPES:
# Choice-based element needs to have a valid answer
answers = json.loads(data['answer'])
choices = json.loads(data['choices'])
if data['element_type'] != 'MULTI_SELECT':
if len(answers) > 1:
raise serializers.ValidationError('Answer must have at most 1 choice')
for answer in answers:
if answer not in choices:
raise serializers.ValidationError('Answer must be one of the choices')
return data
class AbstractElementSerializer(serializers.ModelSerializer):
choices = field.ArrayAsStringField(required=False)
answer = field.ArrayAsStringField(required=False)
class Meta:
model = models.AbstractElement
fields = (
'id',
'display_index',
'concept',
'subroutine',
'question',
'answer',
'choices',
'required',
'image',
'audio',
'action',
'mime_type',
'element_type',
'last_modified',
'created'
)
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, data):
if data['element_type'] in models.Element.CHOICE_TYPES:
# Choice-based element needs to have a valid answer
answers = json.loads(data['answer'])
choices = json.loads(data['choices'])
if data['element_type'] != 'MULTI_SELECT':
if len(answers) > 1:
raise serializers.ValidationError('Answer must have at most 1 choice')
for answer in answers:
if answer not in choices:
raise serializers.ValidationError('Answer must be one of the choices')
return data
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
class PageListSerializer(serializers.ListSerializer):
class Meta(object):
model = models.Page
def update(self, instance, validated_data):
current_page_mapping = {page.id: page for page in instance}
new_data_mapping = {item['id']: item for item in validated_data}
result = []
for new_page_id, data in new_data_mapping.items():
page = current_page_mapping.get(new_page_id, None)
if page is not None:
result.append(self.child.update(page, data))
return result
class ShowIfSerializer(serializers.ModelSerializer):
conditions = field.ConditionTreeField(required=True)
class Meta:
model = models.ShowIf
fields = (
'id',
'page',
'last_modified',
'created',
'conditions'
)
class PageSerializer(serializers.ModelSerializer):
elements = ElementSerializer(many=True, read_only=True)
show_if = ShowIfSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Page
list_serializer_class = PageListSerializer
fields = (
'id',
'display_index',
'procedure',
'elements',
'last_modified',
'created',
'show_if'
)
class ProcedureSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.id')
class Meta:
model = models.Procedure
fields = (
'id',
'title',
'author',
'uuid',
'owner',
'last_modified',
'created',
'version'
)
class ProcedureDetailSerializer(ProcedureSerializer):
owner = serializers.ReadOnlyField(source='owner.id')
pages = PageSerializer(many=True, read_only=True)
class Meta(ProcedureSerializer.Meta):
model = models.Procedure
depth = 1
fields = ProcedureSerializer.Meta.fields + ('pages',)
class ConceptSerializer(serializers.ModelSerializer):
abstractelement = AbstractElementSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Concept
fields = (
'id',
'uuid',
'created',
'last_modified',
'name',
'abstractelement',
'display_name',
'description',
'data_type',
'mime_type',
'constraint'
)
class SubroutineSerializer(serializers.ModelSerializer):
abstractelements = AbstractElementSerializer(many=True, read_only=True)
id = serializers.IntegerField(read_only=False, required=False)
class Meta:
model = models.Subroutine
fields = (
'id',
'uuid',
'created',
'last_modified',
'name',
'abstractelements',
'display_name',
'description',
)
class MDSInstanceSerializer(serializers.ModelSerializer):
class Meta:
model = models.MDSInstance
fields = (
'api_url',
'api_key',
)
| bsd-3-clause | -1,810,374,835,869,102,800 | 26.712062 | 90 | 0.55827 | false |
jedie/django-debug-toolbar-django-info | django_info_panel/panels/database.py | 1 | 1644 | #!/usr/bin/env python
"""
django-info-panel
~~~~~~~~~~~~~~~~~
:copyleft: 2015-2016 by the django-debug-toolbar-django-info team, see AUTHORS for more details.
:created: 2015 by JensDiemer.de
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import logging
from django.db import connection
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
logger = logging.getLogger(__name__)
class DatabaseInfo(Panel):
nav_title = _("Database")
title=_("Database Information")
template = 'django_info/panels/database.html'
def process_response(self, request, response):
apps_info = []
app_configs = apps.get_app_configs()
for app_config in app_configs:
models = app_config.get_models()
model_info = []
for model in models:
model_info.append({
"name":model._meta.object_name,
})
apps_info.append({
"app_name": app_config.name,
"app_models": model_info,
})
self.record_stats({
"db_backend_name": connection.Database.__name__,
"db_backend_module": connection.Database.__file__,
"db_backend_version": getattr(connection.Database, "version", "?"),
"apps_info": apps_info,
"db_table_names": sorted(connection.introspection.table_names()),
"django_tables": sorted(connection.introspection.django_table_names()),
})
| gpl-3.0 | 8,547,051,120,517,676,000 | 28.890909 | 100 | 0.599148 | false |
NeilT-UK/python-utilities | engineering_conversions.py | 1 | 15717 | """ The engineering_conversions module
This provides two functions
eng_float() which behaves like float()
eng_str() which behaves like str()
but use engineering powers of 1000, and BIPM text multipliers like k and G
In the spirit of 'talk exact, listen forgiving', eng_float() understands all prefixes
defined by BIPM, both unicode micro characters, and strings like meg and Mega used by SPICE
>>> eng_float('12u6')
1.26e-05
>>> eng_str(1e7)
'10M'
"""
"""
Copyright (c) <2016>, <Neil Thomas>, <NeilT-UK>, <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
version = '2.0.1 Dec 2016'
MICRO_SIGN = '\u00B5'
GREEK_MU = '\u03BC'
# define the multipliers for making floats into strings
# this happens at load time, so the time taken is 'lost' in the overall program load time
ms = {+3:'k', +6:'M', +9:'G', +12:'T', +15:'P', +18:'E', +21:'Z', +24:'Y',
-3:'m', -6:'u', -9:'n', -12:'p', -15:'f', -18:'a', -21:'z', -24:'y'}
# define the weights for decoding strings to floats
# invert the multipliers that we have
weights = {}
for m in ms:
weights[ms[m]] = m
weights[MICRO_SIGN] = -6 # add both micros
weights[GREEK_MU] = -6
longest_weights = {} # add the meg variations
longest_weights['mega'] = 6 # in size order
longest_weights['MEGA'] = 6
longest_weights['Mega'] = 6 # these need to be longer due to the trailing a != atto
long_weights = {}
long_weights['meg'] = 6
long_weights['MEG'] = 6
long_weights['Meg'] = 6
long_weights['da'] = 1 # trailing a != atto, so put it in longer
weights['d'] = -1 # add the non-3 BIPM SI prefixes, because we can
weights['c'] = -2 # and it makes listening more forgiving
weights['h'] = 2
# what *ck-wit at BIPM thought Exa would make a good multiplier
# when 'e' was already in use for exponents???
# this means that '34E3' will be interpretted as 34000, rather than 34.3 Exa
# but 34E will get interpretted as Exa, as float() doesn't like it
def eng_str(x, digits=6, limits=(-12,9), micro='u', mega='M',
infix=False, show_plus=False, show_trailing_zeros=False):
"""Return a formatted string, using powers of 1000, and BIPM engineering multipliers
digits integer, defaults to 6
<1 corrected to 1, large values honoured
limits tuple of integers, defaults to (-12,9), the electronics range pico to Giga
restricts the substitution range to more common prefixes
order can be (low, high) or (high, low)
micro string for -6 substitution, defaults to 'u'
unicode MICRO_SIGN and GREEK_MU available as module constants
mega string for +6, defaults to 'M', meg is used by most SPICEs
infix defaults to False, use decimal point and suffix, if True, replace decimal with symbol
show_plus defaults to False, minus is always shown regardless of this switch
show_trailing_zeros defaults to False, zeros truncated as far as possible
check a few simple random numbers with defaults
>>> [eng_str(n) for n in (3, 1.2e3, 30e3, -0.007)]
['3', '1.2k', '30k', '-7m']
check some 'normal' big numbers, and the default limits behaviour
>>> [eng_str(4*pow(10,n)) for n in range(4, 13)]
['40k', '400k', '4M', '40M', '400M', '4G', '40G', '400G', '4e+12']
check all the large multipliers, and excess limit behaviour
>>> [eng_str(3*pow(10,n), limits=(100,-100)) for n in range(3, 28, 3)]
['3k', '3M', '3G', '3T', '3P', '3E', '3Z', '3Y', '3e+27']
check some 'normal' small numbers, and the limits behaviour
>>> [eng_str(4*pow(10,-n)) for n in range(4, 18, 2)]
['400u', '4u', '40n', '400p', '4p', '40e-15', '400e-18']
check all the small multipliers
>>> [eng_str(3*pow(10,n), limits=(100,-100)) for n in range(-3, -28, -3)]
['3m', '3u', '3n', '3p', '3f', '3a', '3z', '3y', '3e-27']
check the digits parameter and trailing zeros, which defaults to false
>>> [eng_str(314159, digits=n) for n in range(8)]
['300k', '300k', '310k', '314k', '314.2k', '314.16k', '314.159k', '314.159k']
check trailing zeros on
>>> [eng_str(314159, digits=8, show_trailing_zeros=stz) for stz in (True, False)]
['314.15900k', '314.159k']
demonstrate infix (ugly, isn't it)
>>> [eng_str(314159, infix=infx) for infx in (True, False)]
['314k159', '314.159k']
check the sign control is working
>>> [eng_str(n, show_plus=sp) for (n, sp) in ((1, False), (1, True), (-1, False), (-1, True))]
['1', '+1', '-1', '-1']
huge numbers of digits?
>>> eng_str(314159, digits=30, show_trailing_zeros=True)
'314.159000000000000000000000000k'
fractional digits?
>>> eng_str(314159, digits=5.5)
'314.16k'
extreme number sizes (within the range of float)
>>> [eng_str(3*pow(10,n)) for n in range(-306, 307, 102)]
['3e-306', '3e-204', '3e-102', '3', '3e+102', '3e+204', '3e+306']
check the e+06 substitutions, normal and bizarre (I can't think of a good reason to trap)
>>> [eng_str(4e8, mega=n) for n in ('M', 'meg', 'Mega', 'foo')]
['400M', '400meg', '400Mega', '400foo']
check the e-06 substitutions, normal and bizarre (I still can't think of a good reason to trap)
>>> [eng_str(4e-5, micro=n) for n in ('u', MICRO_SIGN, GREEK_MU, 'z')]
['40u', '40µ', '40μ', '40z']
"""
# don't be silly
digits = int(digits) # is this defensive? are we going to get a float?
if digits<1:
digits=1
# let the e format do the heavy lifting
# force a + sign to regularise the format
# though we still have to look for the e as the exp field width can vary
e_str = '{:+.{fdigits}e}'.format(x, fdigits=digits-1)
# now pull the fields apart
sign = e_str[0]
ipart = e_str[1]
dp = '.'
fpart = e_str[3:(digits+2)]
exp = int(e_str[e_str.find('e')+1:])
# print('raw e format ', sign, ipart, dp, fpart, exp)
# find whether exp is a factor of 3, and adjust if not
adjustment = exp%3
# beef up length of fpart if it needs it
while len(fpart)<adjustment:
fpart += '0'
# transfer digits from fpart to ipart
ipart += fpart[:adjustment]
fpart = fpart[adjustment:]
# and fix the exponent
exp -= adjustment
# print('normed to 3 ', sign, ipart, dp, fpart, exp)
# optionally take off the trailing zeros
if not show_trailing_zeros:
fpart = fpart.rstrip('0')
# and kill the decimal point if the fractional part has gone
if not(fpart):
dp = ''
# print('removed zeros ', sign, ipart, dp, fpart, exp)
# now we have to figure out exactly how to format this puppy
# I am going to try to minimise if then else try except special cases
# and just make it run a standard process
# find the limits that we are going to use, and shield the dict
hilim = min(max(limits), 24)
lolim = max(min(limits), -24)
# deal with the +6 and -6 special cases by putting them into the dict
ms[6] = mega
ms[-6] = micro
# deal with the special case of 0 for infix/postfix use by putting into dict
# print('infix is ', infix)
if infix:
ms[0] = '.'
else:
ms[0] = ''
# is substitution possible?
can_subs = lolim <= exp <= hilim
# print('can we substitute?', can_subs)
if not can_subs:
mult = 'e{:+}'.format(exp)
# remove the plus if we don't need it
if (not show_plus) and (sign=='+'):
sign = ''
# finally
# if we can make an infix substitution
if infix and can_subs:
# print('doing infix subs')
return '{}{}{}{}'.format(sign, ipart, ms[exp], fpart)
# if we can make a postfix substitution
if can_subs:
# print('doing postfix subs')
return '{}{}{}{}{}'.format(sign, ipart, dp, fpart, ms[exp])
# we can't make any substitution, return numeric
# print('doing the default formatting')
return '{}{}{}{}{}'.format(sign, ipart, dp, fpart, mult)
def eng_float(x_org):
"""[eng_f]Return a float, interpretting BIPM engineering prefixes[end_help]
identify and substitute all prefix symbols defined by the BIPM
and various versions of 'meg' used by most SPICE programs
raise ValueError if the string is empty or cannot be interpretted as an engineering float
check the simple >1 multipliers
>>> [eng_float(s) for s in ('1', '1da', '1h', '1k', '1M', '1meg', '1Mega', '1G')]
[1.0, 10.0, 100.0, 1000.0, 1000000.0, 1000000.0, 1000000.0, 1000000000.0]
>>> [eng_float(s) for s in ('1T', '1P', '1E', '1Z', '1Y')]
[1000000000000.0, 1000000000000000.0, 1e+18, 1e+21, 1e+24]
check the simple <1 multipliers
>>> [eng_float(s) for s in ('1', '1d', '1c', '1m', '1u', '1'+MICRO_SIGN, '1'+GREEK_MU)]
[1.0, 0.1, 0.01, 0.001, 1e-06, 1e-06, 1e-06]
>>> [eng_float(s) for s in ('1p', '1f', '1a', '1z', '1y')]
[1e-12, 1e-15, 1e-18, 1e-21, 1e-24]
check infix and suffix forms
>>> [eng_float(s) for s in ('1.3k', '1k3', '1u3', '1.3u')]
[1300.0, 1300.0, 1.3e-06, 1.3e-06]
check negative numbers
>>> [eng_float(s) for s in ('-1.3k', '-1k3', '-1u3', '-1.3u')]
[-1300.0, -1300.0, -1.3e-06, -1.3e-06]
empty input
>>> eng_float('')
Traceback (most recent call last):
...
ValueError: no input, nothing to do
illegal format with infix
>>> eng_float('1.2m3')
Traceback (most recent call last):
...
ValueError: "1.2m3" found infix "m" but "1.2.3e-3" not parsed
illegal format with suffix
>>> eng_float('14.3mm')
Traceback (most recent call last):
...
ValueError: "14.3mm" found suffix "m" but "14.3me-3" not parsed
unrecognised suffix
>>> eng_float('1t')
Traceback (most recent call last):
...
ValueError: could not parse "1t" as float, no multiplier found
bare suffix
>>> eng_float('m')
Traceback (most recent call last):
...
ValueError: "m" found suffix "m" but "e-3" not parsed
we let float() do the heavy lifting
"""
if len(x_org)==0:
raise ValueError('no input, nothing to do')
try:
return float(x_org)
except ValueError:
pass
# so float() couldn't make sense of it
# let's whip off any non-printing characters before we start
x = x_org.strip()
# does it end in any of our pre-defined multipliers, check long to short?
cand = None
for search_in in (longest_weights, long_weights, weights):
if cand:
break
for suffix in search_in:
if cand:
break
if x.endswith(suffix):
cand = suffix
cand_weight = search_in[suffix]
if cand:
# got one! remove it
x = x[:(-len(cand))]
# and replace it with an exponent
x += 'e'+str(cand_weight)
# and now see whether float can make sense of it
# use this two-step process as it delivers clean ValueErrors
try:
thing = float(x)
except ValueError:
thing = None
if thing==None:
raise ValueError('"{}" found suffix "{}" but "{}" not parsed'.format(x_org, cand, x))
else:
return thing
# nope, if we get here, float choked on the substitution
# so does it have an infix embedded in it?
# need to check in the order longest to shortest
# to avoid existing prematurely with 'm', when there's a 'mega'
cand = None
for search_in in (longest_weights, long_weights, weights):
if cand:
break
for infix in search_in:
if cand:
break
pos = x.find(infix)
if pos >= 0:
cand = infix
cand_weight = search_in[infix]
if cand:
# got one! remove it
first = x[:pos]
last = x[(pos+len(cand)):]
# replace with decimal point and add a numeric exponent
x = first+'.'+last+'e'+str(cand_weight)
# and now can float() chow down on it?
try:
thing = float(x)
except ValueError:
thing = None
if thing==None:
raise ValueError('"{}" found infix "{}" but "{}" not parsed'.format(x_org, cand, x))
else:
return thing
raise ValueError('could not parse "{}" as float, no multiplier found'.format(x_org))
if __name__ == '__main__':
import doctest
print('running doctest')
print('nothing else below here means everything has passed')
doctest.testmod()
"""
if __name__ == '__main__':
import tkinter as tki
import GUI_IO_widget as giw
def execute_func():
ps = panel.get()
# print(ps)
n = ps['number']
ds = ps['digits']
zeros = ps['trailing zeros']
infix = ps['infix']
mustr = ps['micro_str']
Mstr = ps['mega_str']
rep = eng_str(n, ds, show_trailing_zeros=zeros, infix=infix, micro=mustr, mega=Mstr)
panel.set_data('output', rep)
def exec_float_func():
fs = fanel.get()
num = fs['eng_float func']
fanel.set_data('output', str(num))
root = tki.Tk()
panel = giw.GUI_inputs(root, execute=execute_func, text='Test eng_str()')
panel.pack()
panel.add('number', conv=float)
panel.add('digits', conv=int)
panel.add('trailing zeros', conv=bool)
panel.add('infix', conv=bool)
panel.add('micro_str', data='u')
panel.add('mega_str', data='M')
panel.add('output', output_only=True)
fanel = giw.GUI_inputs(root, execute=exec_float_func, text='Test eng_float()')
fanel.pack()
fanel.add('eng_float func', conv=eng_float, data=1)
fanel.add('output', output_only=True)
root.mainloop()
"""
"""
if __name__ == '__main__':
a = 2
while a:
a = input('test string - ')
try:
print(eng_float(a))
except ValueError as ve:
print('threw value error')
print(ve)
"""
| bsd-2-clause | -5,959,914,873,885,239,000 | 31.603734 | 101 | 0.598855 | false |
Nepochal/wallabag-cli | wallabag/wallabag_show.py | 1 | 3546 | """
Show a wallabag entry
"""
import io
import formatter
import json
import os
from sys import exit
import sys
from bs4 import BeautifulSoup
import api
import conf
import entry
def show(entry_id, colors=True, raw=False, html=False):
"""
Main function for showing an entry.
"""
conf.load()
try:
request = api.api_get_entry(entry_id)
__handle_request_error(request)
entr = entry.Entry(json.loads(request.response))
except api.OAuthException as ex:
print("Error: {0}".format(ex.text))
print()
exit(-1)
title = entr.title
try:
delimiter = "".ljust(os.get_terminal_size().columns, '=')
# piped output to file or other process
except OSError:
delimiter = "\n"
article = entr.content
if not html:
article = html2text(article, colors)
output = "{0}\n{1}\n{2}".format(title, delimiter, article)
if not raw:
output = __format_text(output)
print(output)
def html2text(html, colors=True):
soup = BeautifulSoup(html, "html.parser")
# Color h1-h3
if colors:
h1colors = '\033[93m'
h1colore = '\033[0m'
else:
h1colors = h1colore = ""
for h1 in soup.findAll('h1'):
h1.string = "\n{0}{1}{2}".format(h1colors, h1.string, h1colore)
for h2 in soup.findAll('h2'):
h2.string = "\n{0}{1}{2}".format(h1colors, h2.string, h1colore)
for h3 in soup.findAll('h3'):
h3.string = "\n{0}{1}{2}".format(h1colors, h3.string, h1colore)
if colors:
# Color bold texts
bcolors = '\033[92m'
bcolore = '\033[0m'
for bold in soup.findAll('b'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
for bold in soup.findAll('strong'):
bold.string = "{0}{1}{2}".format(bcolors, bold.string, bcolore)
# Replace hr with visual lines
try:
hrstring = "".ljust(os.get_terminal_size().columns, '-')
# piped output to file or other process
except OSError:
hrstring = "-----"
for hr in soup.findAll('hr'):
replace = soup.new_tag('p')
replace.string = hrstring
hr.insert_after(replace)
hr.unwrap()
# Replace images by information-texts
for img in soup.findAll('img'):
replace = soup.new_tag('p')
try:
alt = " \"{0}\"".format(img['alt'])
except KeyError:
alt = ""
replace.string = "[IMAGE{0}]\n".format(alt)
img.insert_after(replace)
img.unwrap()
return soup.text
def __format_text(text):
try:
maxcol = os.get_terminal_size().columns
# piped output to file or other process
except OSError:
maxcol = sys.maxsize
ret = ""
for line in text.splitlines():
ios = io.StringIO()
writer = formatter.DumbWriter(ios, maxcol=maxcol)
writer.send_flowing_data(line)
ret = "{0}{1}\n".format(ret, ios.getvalue())
ios.close()
return ret
def __handle_request_error(request):
if request.has_error():
if request.error == api.Error.http_forbidden or request.error == api.Error.http_not_found:
print("Error: Invalid entry id.")
print()
exit(-1)
print("Error: {0} - {1}".format(request.error_text,
request.error_description))
exit(-1)
| mit | 553,081,221,058,167,040 | 25.703125 | 98 | 0.551043 | false |
jmptrader/duktape | tools/genconfig.py | 1 | 56701 | #!/usr/bin/env python2
#
# Process Duktape option metadata and produce various useful outputs:
#
# - duk_config.h with specific or autodetected platform, compiler, and
# architecture, forced options, sanity checks, etc
# - option documentation for Duktape config options (DUK_USE_xxx)
#
# Genconfig tries to build all outputs based on modular metadata, so that
# managing a large number of config options (which is hard to avoid given
# the wide range of targets Duktape supports) remains maintainable.
#
# Genconfig does *not* try to support all exotic platforms out there.
# Instead, the goal is to allow the metadata to be extended, or to provide
# a reasonable starting point for manual duk_config.h tweaking.
#
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(name)-21s %(levelname)-7s %(message)s')
logger = logging.getLogger('genconfig.py')
logger.setLevel(logging.INFO)
import os
import re
import json
import yaml
import optparse
import tarfile
import tempfile
import atexit
import shutil
import logging
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
#
# Globals holding scanned metadata, helper snippets, etc
#
# Metadata to scan from config files.
use_defs = None
use_defs_list = None
opt_defs = None
opt_defs_list = None
use_tags = None
use_tags_list = None
tags_meta = None
required_use_meta_keys = [
'define',
'introduced',
'default',
'tags',
'description'
]
allowed_use_meta_keys = [
'define',
'introduced',
'deprecated',
'removed',
'unused',
'requires',
'conflicts',
'related',
'default',
'tags',
'description',
'warn_if_missing'
]
required_opt_meta_keys = [
'define',
'introduced',
'tags',
'description'
]
allowed_opt_meta_keys = [
'define',
'introduced',
'deprecated',
'removed',
'unused',
'requires',
'conflicts',
'related',
'tags',
'description'
]
# Preferred tag order for option documentation.
doc_tag_order = [
'portability',
'memory',
'lowmemory',
'ecmascript',
'execution',
'debugger',
'debug',
'development'
]
# Preferred tag order for generated C header files.
header_tag_order = doc_tag_order
# Helper headers snippets.
helper_snippets = None
# Assume these provides come from outside.
assumed_provides = {
'DUK_SINGLE_FILE': True, # compiling Duktape from a single source file (duktape.c) version
'DUK_COMPILING_DUKTAPE': True, # compiling Duktape (not user application)
'DUK_CONFIG_H_INCLUDED': True, # artifact, include guard
}
# Platform files must provide at least these (additional checks
# in validate_platform_file()). Fill-ins provide missing optionals.
platform_required_provides = [
'DUK_USE_OS_STRING' # must be #define'd
]
# Architecture files must provide at least these (additional checks
# in validate_architecture_file()). Fill-ins provide missing optionals.
architecture_required_provides = [
'DUK_USE_ARCH_STRING'
]
# Compiler files must provide at least these (additional checks
# in validate_compiler_file()). Fill-ins provide missing optionals.
compiler_required_provides = [
# Compilers need a lot of defines; missing defines are automatically
# filled in with defaults (which are mostly compiler independent), so
# the requires define list is not very large.
'DUK_USE_COMPILER_STRING', # must be #define'd
'DUK_USE_BRANCH_HINTS', # may be #undef'd, as long as provided
'DUK_USE_VARIADIC_MACROS', # may be #undef'd, as long as provided
'DUK_USE_UNION_INITIALIZERS' # may be #undef'd, as long as provided
]
#
# Miscellaneous helpers
#
def get_auto_delete_tempdir():
tmpdir = tempfile.mkdtemp(suffix='-genconfig')
def _f(dirname):
logger.debug('Deleting temporary directory: %r' % dirname)
if os.path.isdir(dirname) and '-genconfig' in dirname:
shutil.rmtree(dirname)
atexit.register(_f, tmpdir)
return tmpdir
def strip_comments_from_lines(lines):
# Not exact but close enough. Doesn't handle string literals etc,
# but these are not a concrete issue for scanning preprocessor
# #define references.
#
# Comment contents are stripped of any DUK_ prefixed text to avoid
# incorrect requires/provides detection. Other comment text is kept;
# in particular a "/* redefine */" comment must remain intact here.
# (The 'redefine' hack is not actively needed now.)
#
# Avoid Python 2.6 vs. Python 2.7 argument differences.
def censor(x):
return re.sub(re.compile('DUK_\w+', re.MULTILINE), 'xxx', x.group(0))
tmp = '\n'.join(lines)
tmp = re.sub(re.compile('/\*.*?\*/', re.MULTILINE | re.DOTALL), censor, tmp)
tmp = re.sub(re.compile('//.*?$', re.MULTILINE), censor, tmp)
return tmp.split('\n')
# Header snippet representation: lines, provides defines, requires defines.
re_line_provides = re.compile(r'^#(?:define|undef)\s+(\w+).*$')
re_line_requires = re.compile(r'(DUK_[A-Z0-9_]+)') # uppercase only, don't match DUK_USE_xxx for example
class Snippet:
lines = None # lines of text and/or snippets
provides = None # map from define to 'True' for now
requires = None # map from define to 'True' for now
def __init__(self, lines, provides=None, requires=None, autoscan_requires=True, autoscan_provides=True):
self.lines = []
if not isinstance(lines, list):
raise Exception('Snippet constructor must be a list (not e.g. a string): %s' % repr(lines))
for line in lines:
if isinstance(line, str):
self.lines.append(line)
elif isinstance(line, unicode):
self.lines.append(line.encode('utf-8'))
else:
raise Exception('invalid line: %r' % line)
self.provides = {}
if provides is not None:
for k in provides.keys():
self.provides[k] = True
self.requires = {}
if requires is not None:
for k in requires.keys():
self.requires[k] = True
stripped_lines = strip_comments_from_lines(lines)
#for line in stripped_lines:
# logger.debug(line)
for line in stripped_lines:
# Careful with order, snippet may self-reference its own
# defines in which case there's no outward dependency.
# (This is not 100% because the order of require/provide
# matters and this is not handled now.)
#
# Also, some snippets may #undef/#define another define but
# they don't "provide" the define as such. Such redefinitions
# are marked "/* redefine */" in the snippets. They're best
# avoided (and not currently needed in Duktape 1.4.0).
if autoscan_provides:
m = re_line_provides.match(line)
if m is not None and '/* redefine */' not in line and \
len(m.group(1)) > 0 and m.group(1)[-1] != '_':
# Don't allow e.g. DUK_USE_ which results from matching DUK_USE_xxx
#logger.debug('PROVIDES: %r' % m.group(1))
self.provides[m.group(1)] = True
if autoscan_requires:
matches = re.findall(re_line_requires, line)
for m in matches:
if len(m) > 0 and m[-1] == '_':
# Don't allow e.g. DUK_USE_ which results from matching DUK_USE_xxx
pass
elif m[:7] == 'DUK_OPT':
#logger.warning('Encountered DUK_OPT_xxx in a header snippet: %s' % repr(line))
# DUK_OPT_xxx always come from outside
pass
elif m[:7] == 'DUK_USE':
# DUK_USE_xxx are internal and they should not be 'requirements'
pass
elif self.provides.has_key(m):
# Snippet provides it's own require; omit
pass
else:
#logger.debug('REQUIRES: %r' % m)
self.requires[m] = True
def fromFile(cls, filename):
lines = []
with open(filename, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
if line[:8] == '#snippet':
m = re.match(r'#snippet\s+"(.*?)"', line)
# XXX: better plumbing for lookup path
sub_fn = os.path.normpath(os.path.join(filename, '..', '..', 'header-snippets', m.group(1)))
logger.debug('#snippet ' + sub_fn)
sn = Snippet.fromFile(sub_fn)
lines += sn.lines
else:
lines.append(line)
return Snippet(lines, autoscan_requires=True, autoscan_provides=True)
fromFile = classmethod(fromFile)
def merge(cls, snippets):
ret = Snippet([], [], [])
for s in snippets:
ret.lines += s.lines
for k in s.provides.keys():
ret.provides[k] = True
for k in s.requires.keys():
ret.requires[k] = True
return ret
merge = classmethod(merge)
# Helper for building a text file from individual lines, injected files, etc.
# Inserted values are converted to Snippets so that their provides/requires
# information can be tracked. When non-C outputs are created, these will be
# bogus but ignored.
class FileBuilder:
vals = None # snippet list
base_dir = None
use_cpp_warning = False
def __init__(self, base_dir=None, use_cpp_warning=False):
self.vals = []
self.base_dir = base_dir
self.use_cpp_warning = use_cpp_warning
def line(self, line):
self.vals.append(Snippet([ line ]))
def lines(self, lines):
if len(lines) > 0 and lines[-1] == '\n':
lines = lines[:-1] # strip last newline to avoid empty line
self.vals.append(Snippet(lines.split('\n')))
def empty(self):
self.vals.append(Snippet([ '' ]))
def rst_heading(self, title, char, doubled=False):
tmp = []
if doubled:
tmp.append(char * len(title))
tmp.append(title)
tmp.append(char * len(title))
self.vals.append(Snippet(tmp))
def snippet_relative(self, fn):
sn = Snippet.fromFile(os.path.join(self.base_dir, fn))
self.vals.append(sn)
return sn
def snippet_absolute(self, fn):
sn = Snippet.fromFile(fn)
self.vals.append(sn)
return sn
def cpp_error(self, msg):
# XXX: assume no newlines etc
self.vals.append(Snippet([ '#error %s' % msg ]))
def cpp_warning(self, msg):
# XXX: assume no newlines etc
# XXX: support compiler specific warning mechanisms
if self.use_cpp_warning:
# C preprocessor '#warning' is often supported
self.vals.append(Snippet([ '#warning %s' % msg ]))
else:
self.vals.append(Snippet([ '/* WARNING: %s */' % msg ]))
def cpp_warning_or_error(self, msg, is_error=True):
if is_error:
self.cpp_error(msg)
else:
self.cpp_warning(msg)
def chdr_comment_line(self, msg):
self.vals.append(Snippet([ '/* %s */' % msg ]))
def chdr_block_heading(self, msg):
lines = []
lines.append('')
lines.append('/*')
lines.append(' * ' + msg)
lines.append(' */')
lines.append('')
self.vals.append(Snippet(lines))
def join(self):
tmp = []
for line in self.vals:
if not isinstance(line, object):
raise Exception('self.vals must be all snippets')
for x in line.lines: # x is a Snippet
tmp.append(x)
return '\n'.join(tmp)
def fill_dependencies_for_snippets(self, idx_deps):
fill_dependencies_for_snippets(self.vals, idx_deps)
# Insert missing define dependencies into index 'idx_deps' repeatedly
# until no unsatisfied dependencies exist. This is used to pull in
# the required DUK_F_xxx helper defines without pulling them all in.
# The resolution mechanism also ensures dependencies are pulled in the
# correct order, i.e. DUK_F_xxx helpers may depend on each other (as
# long as there are no circular dependencies).
#
# XXX: this can be simplified a lot
def fill_dependencies_for_snippets(snippets, idx_deps):
# graph[A] = [ B, ... ] <-> B, ... provide something A requires.
graph = {}
snlist = []
resolved = [] # for printing only
def add(sn):
if sn in snlist:
return # already present
snlist.append(sn)
to_add = []
for k in sn.requires.keys():
if assumed_provides.has_key(k):
continue
found = False
for sn2 in snlist:
if sn2.provides.has_key(k):
if not graph.has_key(sn):
graph[sn] = []
graph[sn].append(sn2)
found = True # at least one other node provides 'k'
if not found:
logger.debug('Resolving %r' % k)
resolved.append(k)
# Find a header snippet which provides the missing define.
# Some DUK_F_xxx files provide multiple defines, so we don't
# necessarily know the snippet filename here.
sn_req = None
for sn2 in helper_snippets:
if sn2.provides.has_key(k):
sn_req = sn2
break
if sn_req is None:
logger.debug(repr(sn.lines))
raise Exception('cannot resolve missing require: %r' % k)
# Snippet may have further unresolved provides; add recursively
to_add.append(sn_req)
if not graph.has_key(sn):
graph[sn] = []
graph[sn].append(sn_req)
for sn in to_add:
add(sn)
# Add original snippets. This fills in the required nodes
# recursively.
for sn in snippets:
add(sn)
# Figure out fill-ins by looking for snippets not in original
# list and without any unserialized dependent nodes.
handled = {}
for sn in snippets:
handled[sn] = True
keepgoing = True
while keepgoing:
keepgoing = False
for sn in snlist:
if handled.has_key(sn):
continue
success = True
for dep in graph.get(sn, []):
if not handled.has_key(dep):
success = False
if success:
snippets.insert(idx_deps, sn)
idx_deps += 1
snippets.insert(idx_deps, Snippet([ '' ]))
idx_deps += 1
handled[sn] = True
keepgoing = True
break
# XXX: detect and handle loops cleanly
for sn in snlist:
if handled.has_key(sn):
continue
logger.debug('UNHANDLED KEY')
logger.debug('PROVIDES: %r' % sn.provides)
logger.debug('REQUIRES: %r' % sn.requires)
logger.debug('\n'.join(sn.lines))
#logger.debug(repr(graph))
#logger.debug(repr(snlist))
logger.debug('Resolved helper defines: %r' % resolved)
logger.debug('Resolved %d helper defines' % len(resolved))
def serialize_snippet_list(snippets):
ret = []
emitted_provides = {}
for k in assumed_provides.keys():
emitted_provides[k] = True
for sn in snippets:
ret += sn.lines
for k in sn.provides.keys():
emitted_provides[k] = True
for k in sn.requires.keys():
if not emitted_provides.has_key(k):
# XXX: conditional warning, happens in some normal cases
logger.warning('define %r required, not provided so far' % k)
pass
return '\n'.join(ret)
def remove_duplicate_newlines(x):
ret = []
empty = False
for line in x.split('\n'):
if line == '':
if empty:
pass
else:
ret.append(line)
empty = True
else:
empty = False
ret.append(line)
return '\n'.join(ret)
def scan_use_defs(dirname):
global use_defs, use_defs_list
use_defs = {}
use_defs_list = []
for fn in os.listdir(dirname):
root, ext = os.path.splitext(fn)
if not root.startswith('DUK_USE_') or ext != '.yaml':
continue
with open(os.path.join(dirname, fn), 'rb') as f:
doc = yaml.load(f)
if doc.get('example', False):
continue
if doc.get('unimplemented', False):
logger.warning('unimplemented: %s' % fn)
continue
dockeys = doc.keys()
for k in dockeys:
if not k in allowed_use_meta_keys:
logger.warning('unknown key %s in metadata file %s' % (k, fn))
for k in required_use_meta_keys:
if not k in dockeys:
logger.warning('missing key %s in metadata file %s' % (k, fn))
use_defs[doc['define']] = doc
keys = use_defs.keys()
keys.sort()
for k in keys:
use_defs_list.append(use_defs[k])
def scan_opt_defs(dirname):
global opt_defs, opt_defs_list
opt_defs = {}
opt_defs_list = []
for fn in os.listdir(dirname):
root, ext = os.path.splitext(fn)
if not root.startswith('DUK_OPT_') or ext != '.yaml':
continue
with open(os.path.join(dirname, fn), 'rb') as f:
doc = yaml.load(f)
if doc.get('example', False):
continue
if doc.get('unimplemented', False):
logger.warning('unimplemented: %s' % fn)
continue
dockeys = doc.keys()
for k in dockeys:
if not k in allowed_opt_meta_keys:
logger.warning('unknown key %s in metadata file %s' % (k, fn))
for k in required_opt_meta_keys:
if not k in dockeys:
logger.warning('missing key %s in metadata file %s' % (k, fn))
opt_defs[doc['define']] = doc
keys = opt_defs.keys()
keys.sort()
for k in keys:
opt_defs_list.append(opt_defs[k])
def scan_use_tags():
global use_tags, use_tags_list
use_tags = {}
for doc in use_defs_list:
for tag in doc.get('tags', []):
use_tags[tag] = True
use_tags_list = use_tags.keys()
use_tags_list.sort()
def scan_tags_meta(filename):
global tags_meta
with open(filename, 'rb') as f:
tags_meta = yaml.load(f)
def scan_helper_snippets(dirname): # DUK_F_xxx snippets
global helper_snippets
helper_snippets = []
for fn in os.listdir(dirname):
if (fn[0:6] != 'DUK_F_'):
continue
logger.debug('Autoscanning snippet: %s' % fn)
helper_snippets.append(Snippet.fromFile(os.path.join(dirname, fn)))
def get_opt_defs(removed=True, deprecated=True, unused=True):
ret = []
for doc in opt_defs_list:
# XXX: aware of target version
if removed == False and doc.get('removed', None) is not None:
continue
if deprecated == False and doc.get('deprecated', None) is not None:
continue
if unused == False and doc.get('unused', False) == True:
continue
ret.append(doc)
return ret
def get_use_defs(removed=True, deprecated=True, unused=True):
ret = []
for doc in use_defs_list:
# XXX: aware of target version
if removed == False and doc.get('removed', None) is not None:
continue
if deprecated == False and doc.get('deprecated', None) is not None:
continue
if unused == False and doc.get('unused', False) == True:
continue
ret.append(doc)
return ret
def validate_platform_file(filename):
sn = Snippet.fromFile(filename)
for req in platform_required_provides:
if req not in sn.provides:
raise Exception('Platform %s is missing %s' % (filename, req))
# DUK_SETJMP, DUK_LONGJMP, DUK_JMPBUF_TYPE are optional, fill-in
# provides if none defined.
def validate_architecture_file(filename):
sn = Snippet.fromFile(filename)
for req in architecture_required_provides:
if req not in sn.provides:
raise Exception('Architecture %s is missing %s' % (filename, req))
# Byte order and alignment defines are allowed to be missing,
# a fill-in will handle them. This is necessary because for
# some architecture byte order and/or alignment may vary between
# targets and may be software configurable.
# XXX: require automatic detection to be signaled?
# e.g. define DUK_USE_ALIGN_BY -1
# define DUK_USE_BYTE_ORDER -1
def validate_compiler_file(filename):
sn = Snippet.fromFile(filename)
for req in compiler_required_provides:
if req not in sn.provides:
raise Exception('Compiler %s is missing %s' % (filename, req))
def get_tag_title(tag):
meta = tags_meta.get(tag, None)
if meta is None:
return tag
else:
return meta.get('title', tag)
def get_tag_description(tag):
meta = tags_meta.get(tag, None)
if meta is None:
return None
else:
return meta.get('description', None)
def get_tag_list_with_preferred_order(preferred):
tags = []
# Preferred tags first
for tag in preferred:
if tag not in tags:
tags.append(tag)
# Remaining tags in alphabetic order
for tag in use_tags_list:
if tag not in tags:
tags.append(tag)
logger.debug('Effective tag order: %r' % tags)
return tags
def rst_format(text):
# XXX: placeholder, need to decide on markup conventions for YAML files
ret = []
for para in text.split('\n'):
if para == '':
continue
ret.append(para)
return '\n\n'.join(ret)
def cint_encode(x):
if not isinstance(x, (int, long)):
raise Exception('invalid input: %r' % x)
# XXX: unsigned constants?
if x > 0x7fffffff or x < -0x80000000:
return '%dLL' % x
elif x > 0x7fff or x < -0x8000:
return '%dL' % x
else:
return '%d' % x
def cstr_encode(x):
if isinstance(x, unicode):
x = x.encode('utf-8')
if not isinstance(x, str):
raise Exception('invalid input: %r' % x)
res = '"'
term = False
has_terms = False
for c in x:
if term:
# Avoid ambiguous hex escapes
res += '" "'
term = False
has_terms = True
o = ord(c)
if o < 0x20 or o > 0x7e or c in '"\\':
res += '\\x%02x' % o
term = True
else:
res += c
res += '"'
if has_terms:
res = '(' + res + ')'
return res
#
# Autogeneration of option documentation
#
# Shared helper to generate DUK_USE_xxx documentation.
# XXX: unfinished placeholder
def generate_option_documentation(opts, opt_list=None, rst_title=None, include_default=False):
ret = FileBuilder(use_cpp_warning=opts.use_cpp_warning)
tags = get_tag_list_with_preferred_order(doc_tag_order)
title = rst_title
ret.rst_heading(title, '=', doubled=True)
handled = {}
for tag in tags:
first = True
for doc in opt_list:
if tag != doc['tags'][0]: # sort under primary tag
continue
dname = doc['define']
desc = doc.get('description', None)
if handled.has_key(dname):
raise Exception('define handled twice, should not happen: %r' % dname)
handled[dname] = True
if first: # emit tag heading only if there are subsections
ret.empty()
ret.rst_heading(get_tag_title(tag), '=')
tag_desc = get_tag_description(tag)
if tag_desc is not None:
ret.empty()
ret.line(rst_format(tag_desc))
first = False
ret.empty()
ret.rst_heading(dname, '-')
if desc is not None:
ret.empty()
ret.line(rst_format(desc))
if include_default:
ret.empty()
ret.line('Default: ``' + str(doc['default']) + '``') # XXX: rst or other format
for doc in opt_list:
dname = doc['define']
if not handled.has_key(dname):
raise Exception('unhandled define (maybe missing from tags list?): %r' % dname)
ret.empty()
return ret.join()
def generate_config_option_documentation(opts):
defs = get_use_defs()
return generate_option_documentation(opts, opt_list=defs, rst_title='Duktape config options', include_default=True)
#
# Helpers for duk_config.h generation
#
def get_forced_options(opts):
# Forced options, last occurrence wins (allows a base config file to be
# overridden by a more specific one).
forced_opts = {}
for val in opts.force_options_yaml:
doc = yaml.load(StringIO(val))
for k in doc.keys():
if use_defs.has_key(k):
pass # key is known
else:
logger.warning('option override key %s not defined in metadata, ignoring' % k)
forced_opts[k] = doc[k] # shallow copy
if len(forced_opts.keys()) > 0:
logger.debug('Overrides: %s' % json.dumps(forced_opts))
return forced_opts
# Emit a default #define / #undef for an option based on
# a config option metadata node (parsed YAML doc).
def emit_default_from_config_meta(ret, doc, forced_opts, undef_done, active_opts):
defname = doc['define']
defval = forced_opts.get(defname, doc['default'])
# NOTE: careful with Python equality, e.g. "0 == False" is true.
if isinstance(defval, bool) and defval == True:
ret.line('#define ' + defname)
active_opts[defname] = True
elif isinstance(defval, bool) and defval == False:
if not undef_done:
ret.line('#undef ' + defname)
else:
# Default value is false, and caller has emitted
# an unconditional #undef, so don't emit a duplicate
pass
active_opts[defname] = False
elif isinstance(defval, (int, long)):
# integer value
ret.line('#define ' + defname + ' ' + cint_encode(defval))
active_opts[defname] = True
elif isinstance(defval, (str, unicode)):
# verbatim value
ret.line('#define ' + defname + ' ' + defval)
active_opts[defname] = True
elif isinstance(defval, dict):
if defval.has_key('verbatim'):
# verbatim text for the entire line
ret.line(defval['verbatim'])
elif defval.has_key('string'):
# C string value
ret.line('#define ' + defname + ' ' + cstr_encode(defval['string']))
else:
raise Exception('unsupported value for option %s: %r' % (defname, defval))
active_opts[defname] = True
else:
raise Exception('unsupported value for option %s: %r' % (defname, defval))
# Add a header snippet for detecting presence of DUK_OPT_xxx feature
# options and warning/erroring if application defines them. Useful for
# Duktape 2.x migration.
def add_legacy_feature_option_checks(opts, ret):
ret.chdr_block_heading('Checks for legacy feature options (DUK_OPT_xxx)')
ret.empty()
defs = []
for doc in get_opt_defs():
if doc['define'] not in defs:
defs.append(doc['define'])
defs.sort()
for optname in defs:
ret.line('#if defined(%s)' % optname)
ret.cpp_warning_or_error('unsupported legacy feature option %s used' % optname, opts.sanity_strict)
ret.line('#endif')
ret.empty()
# Add a header snippet for checking consistency of DUK_USE_xxx config
# options, e.g. inconsistent options, invalid option values.
def add_config_option_checks(opts, ret):
ret.chdr_block_heading('Checks for config option consistency (DUK_USE_xxx)')
ret.empty()
defs = []
for doc in get_use_defs():
if doc['define'] not in defs:
defs.append(doc['define'])
defs.sort()
for optname in defs:
doc = use_defs[optname]
dname = doc['define']
# XXX: more checks
if doc.get('removed', None) is not None:
ret.line('#if defined(%s)' % dname)
ret.cpp_warning_or_error('unsupported config option used (option has been removed): %s' % dname, opts.sanity_strict)
ret.line('#endif')
elif doc.get('deprecated', None) is not None:
ret.line('#if defined(%s)' % dname)
ret.cpp_warning_or_error('unsupported config option used (option has been deprecated): %s' % dname, opts.sanity_strict)
ret.line('#endif')
for req in doc.get('requires', []):
ret.line('#if defined(%s) && !defined(%s)' % (dname, req))
ret.cpp_warning_or_error('config option %s requires option %s (which is missing)' % (dname, req), opts.sanity_strict)
ret.line('#endif')
for req in doc.get('conflicts', []):
ret.line('#if defined(%s) && defined(%s)' % (dname, req))
ret.cpp_warning_or_error('config option %s conflicts with option %s (which is also defined)' % (dname, req), opts.sanity_strict)
ret.line('#endif')
ret.empty()
ret.snippet_relative('cpp_exception_sanity.h.in')
ret.empty()
# Add a header snippet for providing a __OVERRIDE_DEFINES__ section.
def add_override_defines_section(opts, ret):
ret.empty()
ret.line('/*')
ret.line(' * You may add overriding #define/#undef directives below for')
ret.line(' * customization. You of course cannot un-#include or un-typedef')
ret.line(' * anything; these require direct changes above.')
ret.line(' */')
ret.empty()
ret.line('/* __OVERRIDE_DEFINES__ */')
ret.empty()
# Development time helper: add DUK_ACTIVE which provides a runtime C string
# indicating what DUK_USE_xxx config options are active at run time. This
# is useful in genconfig development so that one can e.g. diff the active
# run time options of two headers. This is intended just for genconfig
# development and is not available in normal headers.
def add_duk_active_defines_macro(ret):
ret.chdr_block_heading('DUK_ACTIVE_DEFINES macro (development only)')
idx = 0
for doc in get_use_defs():
defname = doc['define']
ret.line('#if defined(%s)' % defname)
ret.line('#define DUK_ACTIVE_DEF%d " %s"' % (idx, defname))
ret.line('#else')
ret.line('#define DUK_ACTIVE_DEF%d ""' % idx)
ret.line('#endif')
idx += 1
tmp = []
for i in xrange(idx):
tmp.append('DUK_ACTIVE_DEF%d' % i)
ret.line('#define DUK_ACTIVE_DEFINES ("Active: ["' + ' '.join(tmp) + ' " ]")')
#
# duk_config.h generation
#
# Generate a duk_config.h where platform, architecture, and compiler are
# all either autodetected or specified by user.
#
# Autodetection is based on a configured list of supported platforms,
# architectures, and compilers. For example, platforms.yaml defines the
# supported platforms and provides a helper define (DUK_F_xxx) to use for
# detecting that platform, and names the header snippet to provide the
# platform-specific definitions. Necessary dependencies (DUK_F_xxx) are
# automatically pulled in.
#
# Automatic "fill ins" are used for mandatory platform, architecture, and
# compiler defines which have a reasonable portable default. This reduces
# e.g. compiler-specific define count because there are a lot compiler
# macros which have a good default.
def generate_duk_config_header(opts, meta_dir):
ret = FileBuilder(base_dir=os.path.join(meta_dir, 'header-snippets'), \
use_cpp_warning=opts.use_cpp_warning)
# Parse forced options. Warn about missing forced options when it is
# strongly recommended that the option is provided.
forced_opts = get_forced_options(opts)
for doc in use_defs_list:
if doc.get('warn_if_missing', False) and not forced_opts.has_key(doc['define']):
logger.warning('Recommended config option ' + doc['define'] + ' not provided')
# Gather a map of "active options" for genbuiltins.py. This is used to
# implement proper optional built-ins, e.g. if a certain config option
# (like DUK_USE_ES6_PROXY) is disabled, the corresponding objects and
# properties are dropped entirely. The mechanism is not perfect: it won't
# detect fixup changes for example.
active_opts = {}
platforms = None
with open(os.path.join(meta_dir, 'platforms.yaml'), 'rb') as f:
platforms = yaml.load(f)
architectures = None
with open(os.path.join(meta_dir, 'architectures.yaml'), 'rb') as f:
architectures = yaml.load(f)
compilers = None
with open(os.path.join(meta_dir, 'compilers.yaml'), 'rb') as f:
compilers = yaml.load(f)
# XXX: indicate feature option support, sanity checks enabled, etc
# in general summary of options, perhaps genconfig command line?
ret.line('/*')
ret.line(' * duk_config.h configuration header generated by genconfig.py.')
ret.line(' *')
ret.line(' * Git commit: %s' % opts.git_commit or 'n/a')
ret.line(' * Git describe: %s' % opts.git_describe or 'n/a')
ret.line(' * Git branch: %s' % opts.git_branch or 'n/a')
ret.line(' *')
if opts.platform is not None:
ret.line(' * Platform: ' + opts.platform)
else:
ret.line(' * Supported platforms:')
for platf in platforms['autodetect']:
ret.line(' * - %s' % platf.get('name', platf.get('check')))
ret.line(' *')
if opts.architecture is not None:
ret.line(' * Architecture: ' + opts.architecture)
else:
ret.line(' * Supported architectures:')
for arch in architectures['autodetect']:
ret.line(' * - %s' % arch.get('name', arch.get('check')))
ret.line(' *')
if opts.compiler is not None:
ret.line(' * Compiler: ' + opts.compiler)
else:
ret.line(' * Supported compilers:')
for comp in compilers['autodetect']:
ret.line(' * - %s' % comp.get('name', comp.get('check')))
ret.line(' *')
ret.line(' */')
ret.empty()
ret.line('#if !defined(DUK_CONFIG_H_INCLUDED)')
ret.line('#define DUK_CONFIG_H_INCLUDED')
ret.empty()
ret.chdr_block_heading('Intermediate helper defines')
# DLL build affects visibility attributes on Windows but unfortunately
# cannot be detected automatically from preprocessor defines or such.
# DLL build status is hidden behind DUK_F_DLL_BUILD. and there are two
ret.chdr_comment_line('DLL build detection')
if opts.dll:
ret.line('/* configured for DLL build */')
ret.line('#define DUK_F_DLL_BUILD')
else:
ret.line('/* not configured for DLL build */')
ret.line('#undef DUK_F_DLL_BUILD')
ret.empty()
idx_deps = len(ret.vals) # position where to emit DUK_F_xxx dependencies
# Feature selection, system include, Date provider
# Most #include statements are here
if opts.platform is not None:
ret.chdr_block_heading('Platform: ' + opts.platform)
ret.snippet_relative('platform_cppextras.h.in')
ret.empty()
# XXX: better to lookup platforms metadata
include = 'platform_%s.h.in' % opts.platform
abs_fn = os.path.join(meta_dir, 'platforms', include)
validate_platform_file(abs_fn)
ret.snippet_absolute(abs_fn)
else:
ret.chdr_block_heading('Platform autodetection')
ret.snippet_relative('platform_cppextras.h.in')
ret.empty()
for idx, platf in enumerate(platforms['autodetect']):
check = platf.get('check', None)
include = platf['include']
abs_fn = os.path.join(meta_dir, 'platforms', include)
validate_platform_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % platf.get('name', '???'))
ret.snippet_absolute(abs_fn)
ret.line('#endif /* autodetect platform */')
ret.empty()
ret.snippet_relative('platform_sharedincludes.h.in')
ret.empty()
byteorder_provided_by_all = True # byteorder provided by all architecture files
alignment_provided_by_all = True # alignment provided by all architecture files
packedtval_provided_by_all = True # packed tval provided by all architecture files
if opts.architecture is not None:
ret.chdr_block_heading('Architecture: ' + opts.architecture)
# XXX: better to lookup architectures metadata
include = 'architecture_%s.h.in' % opts.architecture
abs_fn = os.path.join(meta_dir, 'architectures', include)
validate_architecture_file(abs_fn)
sn = ret.snippet_absolute(abs_fn)
if not sn.provides.get('DUK_USE_BYTEORDER', False):
byteorder_provided_by_all = False
if not sn.provides.get('DUK_USE_ALIGN_BY', False):
alignment_provided_by_all = False
if sn.provides.get('DUK_USE_PACKED_TVAL', False):
ret.line('#define DUK_F_PACKED_TVAL_PROVIDED') # signal to fillin
else:
packedtval_provided_by_all = False
else:
ret.chdr_block_heading('Architecture autodetection')
for idx, arch in enumerate(architectures['autodetect']):
check = arch.get('check', None)
include = arch['include']
abs_fn = os.path.join(meta_dir, 'architectures', include)
validate_architecture_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % arch.get('name', '???'))
sn = ret.snippet_absolute(abs_fn)
if not sn.provides.get('DUK_USE_BYTEORDER', False):
byteorder_provided_by_all = False
if not sn.provides.get('DUK_USE_ALIGN_BY', False):
alignment_provided_by_all = False
if sn.provides.get('DUK_USE_PACKED_TVAL', False):
ret.line('#define DUK_F_PACKED_TVAL_PROVIDED') # signal to fillin
else:
packedtval_provided_by_all = False
ret.line('#endif /* autodetect architecture */')
ret.empty()
if opts.compiler is not None:
ret.chdr_block_heading('Compiler: ' + opts.compiler)
# XXX: better to lookup compilers metadata
include = 'compiler_%s.h.in' % opts.compiler
abs_fn = os.path.join(meta_dir, 'compilers', include)
validate_compiler_file(abs_fn)
sn = ret.snippet_absolute(abs_fn)
else:
ret.chdr_block_heading('Compiler autodetection')
for idx, comp in enumerate(compilers['autodetect']):
check = comp.get('check', None)
include = comp['include']
abs_fn = os.path.join(meta_dir, 'compilers', include)
validate_compiler_file(abs_fn)
if idx == 0:
ret.line('#if defined(%s)' % check)
else:
if check is None:
ret.line('#else')
else:
ret.line('#elif defined(%s)' % check)
ret.line('/* --- %s --- */' % comp.get('name', '???'))
sn = ret.snippet_absolute(abs_fn)
ret.line('#endif /* autodetect compiler */')
ret.empty()
# DUK_F_UCLIBC is special because __UCLIBC__ is provided by an #include
# file, so the check must happen after platform includes. It'd be nice
# for this to be automatic (e.g. DUK_F_UCLIBC.h.in could indicate the
# dependency somehow).
ret.snippet_absolute(os.path.join(meta_dir, 'helper-snippets', 'DUK_F_UCLIBC.h.in'))
ret.empty()
# XXX: platform/compiler could provide types; if so, need some signaling
# defines like DUK_F_TYPEDEFS_DEFINED
# Number types
if opts.c99_types_only:
ret.snippet_relative('types1.h.in')
ret.line('/* C99 types assumed */')
ret.snippet_relative('types_c99.h.in')
ret.empty()
else:
ret.snippet_relative('types1.h.in')
ret.line('#if defined(DUK_F_HAVE_INTTYPES)')
ret.line('/* C99 or compatible */')
ret.empty()
ret.snippet_relative('types_c99.h.in')
ret.empty()
ret.line('#else /* C99 types */')
ret.empty()
ret.snippet_relative('types_legacy.h.in')
ret.empty()
ret.line('#endif /* C99 types */')
ret.empty()
ret.snippet_relative('types2.h.in')
ret.empty()
ret.snippet_relative('64bitops.h.in')
ret.empty()
# Platform, architecture, compiler fillins. These are after all
# detection so that e.g. DUK_SPRINTF() can be provided by platform
# or compiler before trying a fill-in.
ret.chdr_block_heading('Fill-ins for platform, architecture, and compiler')
ret.snippet_relative('platform_fillins.h.in')
ret.empty()
ret.snippet_relative('architecture_fillins.h.in')
if not byteorder_provided_by_all:
ret.empty()
ret.snippet_relative('byteorder_fillin.h.in')
if not alignment_provided_by_all:
ret.empty()
ret.snippet_relative('alignment_fillin.h.in')
ret.empty()
ret.snippet_relative('compiler_fillins.h.in')
ret.empty()
ret.snippet_relative('inline_workaround.h.in')
ret.empty()
if not packedtval_provided_by_all:
ret.empty()
ret.snippet_relative('packed_tval_fillin.h.in')
# Object layout
ret.snippet_relative('object_layout.h.in')
ret.empty()
# Detect and reject 'fast math'
ret.snippet_relative('reject_fast_math.h.in')
ret.empty()
# Emit forced options. If a corresponding option is already defined
# by a snippet above, #undef it first.
tmp = Snippet(ret.join().split('\n'))
first_forced = True
for doc in get_use_defs(removed=not opts.omit_removed_config_options,
deprecated=not opts.omit_deprecated_config_options,
unused=not opts.omit_unused_config_options):
defname = doc['define']
if not forced_opts.has_key(defname):
continue
if not doc.has_key('default'):
raise Exception('config option %s is missing default value' % defname)
if first_forced:
ret.chdr_block_heading('Forced options')
first_forced = False
undef_done = False
if tmp.provides.has_key(defname):
ret.line('#undef ' + defname)
undef_done = True
emit_default_from_config_meta(ret, doc, forced_opts, undef_done, active_opts)
ret.empty()
# If manually-edited snippets don't #define or #undef a certain
# config option, emit a default value here. This is useful to
# fill-in for new config options not covered by manual snippets
# (which is intentional).
tmp = Snippet(ret.join().split('\n'))
need = {}
for doc in get_use_defs(removed=False):
need[doc['define']] = True
for k in tmp.provides.keys():
if need.has_key(k):
del need[k]
need_keys = sorted(need.keys())
if len(need_keys) > 0:
ret.chdr_block_heading('Autogenerated defaults')
for k in need_keys:
logger.debug('config option %s not covered by manual snippets, emitting default automatically' % k)
emit_default_from_config_meta(ret, use_defs[k], {}, False, active_opts)
ret.empty()
if len(opts.fixup_header_lines) > 0:
ret.chdr_block_heading('Fixups')
for line in opts.fixup_header_lines:
ret.line(line)
ret.empty()
add_override_defines_section(opts, ret)
# Date provider snippet is after custom header and overrides, so that
# the user may define e.g. DUK_USE_DATE_NOW_GETTIMEOFDAY in their
# custom header.
ret.snippet_relative('date_provider.h.in')
ret.empty()
ret.fill_dependencies_for_snippets(idx_deps)
if opts.emit_legacy_feature_check:
add_legacy_feature_option_checks(opts, ret)
if opts.emit_config_sanity_check:
add_config_option_checks(opts, ret)
if opts.add_active_defines_macro:
add_duk_active_defines_macro(ret)
# Derived defines (DUK_USE_INTEGER_LE, etc) from DUK_USE_BYTEORDER.
# Duktape internals currently rely on the derived defines. This is
# after sanity checks because the derived defines are marked removed.
ret.snippet_relative('byteorder_derived.h.in')
ret.empty()
ret.line('#endif /* DUK_CONFIG_H_INCLUDED */')
ret.empty() # for trailing newline
return remove_duplicate_newlines(ret.join()), active_opts
#
# Main
#
def add_genconfig_optparse_options(parser, direct=False):
# Forced options from multiple sources are gathered into a shared list
# so that the override order remains the same as on the command line.
force_options_yaml = []
def add_force_option_yaml(option, opt, value, parser):
# XXX: check that YAML parses
force_options_yaml.append(value)
def add_force_option_file(option, opt, value, parser):
# XXX: check that YAML parses
with open(value, 'rb') as f:
force_options_yaml.append(f.read())
def add_force_option_define(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: True }
elif len(tmp) == 2:
doc = { tmp[0]: tmp[1] }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
def add_force_option_undefine(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: False }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
fixup_header_lines = []
def add_fixup_header_line(option, opt, value, parser):
fixup_header_lines.append(value)
def add_fixup_header_file(option, opt, value, parser):
with open(value, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
fixup_header_lines.append(line)
if direct:
parser.add_option('--metadata', dest='config_metadata', default=None, help='metadata directory')
parser.add_option('--output', dest='output', default=None, help='output filename for C header or RST documentation file')
parser.add_option('--output-active-options', dest='output_active_options', default=None, help='output JSON file with active config options information')
else:
# Different option name when called through configure.py,
# also no --output option.
parser.add_option('--config-metadata', dest='config_metadata', default=None, help='metadata directory (defaulted based on configure.py script path)')
parser.add_option('--platform', dest='platform', default=None, help='platform (default is autodetect)')
parser.add_option('--compiler', dest='compiler', default=None, help='compiler (default is autodetect)')
parser.add_option('--architecture', dest='architecture', default=None, help='architecture (default is autodetec)')
parser.add_option('--c99-types-only', dest='c99_types_only', action='store_true', default=False, help='assume C99 types, no legacy type detection')
parser.add_option('--dll', dest='dll', action='store_true', default=False, help='dll build of Duktape, affects symbol visibility macros especially on Windows')
parser.add_option('--support-feature-options', dest='support_feature_options', action='store_true', default=False, help=optparse.SUPPRESS_HELP)
parser.add_option('--emit-legacy-feature-check', dest='emit_legacy_feature_check', action='store_true', default=False, help='emit preprocessor checks to reject legacy feature options (DUK_OPT_xxx)')
parser.add_option('--emit-config-sanity-check', dest='emit_config_sanity_check', action='store_true', default=False, help='emit preprocessor checks for config option consistency (DUK_USE_xxx)')
parser.add_option('--omit-removed-config-options', dest='omit_removed_config_options', action='store_true', default=False, help='omit removed config options from generated headers')
parser.add_option('--omit-deprecated-config-options', dest='omit_deprecated_config_options', action='store_true', default=False, help='omit deprecated config options from generated headers')
parser.add_option('--omit-unused-config-options', dest='omit_unused_config_options', action='store_true', default=False, help='omit unused config options from generated headers')
parser.add_option('--add-active-defines-macro', dest='add_active_defines_macro', action='store_true', default=False, help='add DUK_ACTIVE_DEFINES macro, for development only')
parser.add_option('--define', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='force #define option using a C compiler like syntax, e.g. "--define DUK_USE_DEEP_C_STACK" or "--define DUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('-D', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='synonym for --define, e.g. "-DDUK_USE_DEEP_C_STACK" or "-DDUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('--undefine', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='force #undef option using a C compiler like syntax, e.g. "--undefine DUK_USE_DEEP_C_STACK"')
parser.add_option('-U', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='synonym for --undefine, e.g. "-UDUK_USE_DEEP_C_STACK"')
parser.add_option('--option-yaml', type='string', metavar='YAML', dest='force_options_yaml', action='callback', callback=add_force_option_yaml, default=force_options_yaml, help='force option(s) using inline YAML (e.g. --option-yaml "DUK_USE_DEEP_C_STACK: true")')
parser.add_option('--option-file', type='string', metavar='FILENAME', dest='force_options_yaml', action='callback', callback=add_force_option_file, default=force_options_yaml, help='YAML file(s) providing config option overrides')
parser.add_option('--fixup-file', type='string', metavar='FILENAME', dest='fixup_header_lines', action='callback', callback=add_fixup_header_file, default=fixup_header_lines, help='C header snippet file(s) to be appended to generated header, useful for manual option fixups')
parser.add_option('--fixup-line', type='string', metavar='LINE', dest='fixup_header_lines', action='callback', callback=add_fixup_header_line, default=fixup_header_lines, help='C header fixup line to be appended to generated header (e.g. --fixup-line "#define DUK_USE_FASTINT")')
parser.add_option('--sanity-warning', dest='sanity_strict', action='store_false', default=True, help='emit a warning instead of #error for option sanity check issues')
parser.add_option('--use-cpp-warning', dest='use_cpp_warning', action='store_true', default=False, help='emit a (non-portable) #warning when appropriate')
if direct:
parser.add_option('--git-commit', dest='git_commit', default=None, help='git commit hash to be included in header comments')
parser.add_option('--git-describe', dest='git_describe', default=None, help='git describe string to be included in header comments')
parser.add_option('--git-branch', dest='git_branch', default=None, help='git branch string to be included in header comments')
parser.add_option('--quiet', dest='quiet', action='store_true', default=False, help='Suppress info messages (show warnings)')
parser.add_option('--verbose', dest='verbose', action='store_true', default=False, help='Show verbose debug messages')
def parse_options():
commands = [
'duk-config-header',
'config-documentation'
]
parser = optparse.OptionParser(
usage='Usage: %prog [options] COMMAND',
description='Generate a duk_config.h or config option documentation based on config metadata.',
epilog='COMMAND can be one of: ' + ', '.join(commands) + '.'
)
add_genconfig_optparse_options(parser, direct=True)
(opts, args) = parser.parse_args()
return opts, args
def genconfig(opts, args):
# Log level.
if opts.quiet:
logger.setLevel(logging.WARNING)
elif opts.verbose:
logger.setLevel(logging.DEBUG)
if opts.support_feature_options:
raise Exception('--support-feature-options and support for DUK_OPT_xxx feature options are obsolete, use DUK_USE_xxx config options instead')
meta_dir = opts.config_metadata
if opts.config_metadata is None:
if os.path.isdir(os.path.join('.', 'config-options')):
opts.config_metadata = '.'
if opts.config_metadata is not None and os.path.isdir(opts.config_metadata):
meta_dir = opts.config_metadata
metadata_src_text = 'Using metadata directory: %r' % meta_dir
else:
raise Exception('metadata argument must be a directory (tar.gz no longer supported)')
scan_helper_snippets(os.path.join(meta_dir, 'helper-snippets'))
scan_use_defs(os.path.join(meta_dir, 'config-options'))
scan_opt_defs(os.path.join(meta_dir, 'feature-options'))
scan_use_tags()
scan_tags_meta(os.path.join(meta_dir, 'tags.yaml'))
logger.debug('%s, scanned%d DUK_USE_XXX, %d helper snippets' % \
(metadata_src_text, len(use_defs.keys()), len(helper_snippets)))
logger.debug('Tags: %r' % use_tags_list)
if len(args) == 0:
raise Exception('missing command')
cmd = args[0]
if cmd == 'duk-config-header':
# Generate a duk_config.h header with platform, compiler, and
# architecture either autodetected (default) or specified by
# user.
desc = [
'platform=' + ('any', opts.platform)[opts.platform is not None],
'architecture=' + ('any', opts.architecture)[opts.architecture is not None],
'compiler=' + ('any', opts.compiler)[opts.compiler is not None]
]
if opts.dll:
desc.append('dll mode')
logger.info('Creating duk_config.h: ' + ', '.join(desc))
result, active_opts = generate_duk_config_header(opts, meta_dir)
with open(opts.output, 'wb') as f:
f.write(result)
logger.debug('Wrote duk_config.h to ' + str(opts.output))
if opts.output_active_options is not None:
with open(opts.output_active_options, 'wb') as f:
f.write(json.dumps(active_opts, indent=4))
logger.debug('Wrote active options JSON metadata to ' + str(opts.output_active_options))
elif cmd == 'feature-documentation':
raise Exception('The feature-documentation command has been removed along with DUK_OPT_xxx feature option support')
elif cmd == 'config-documentation':
logger.info('Creating config option documentation')
result = generate_config_option_documentation(opts)
with open(opts.output, 'wb') as f:
f.write(result)
logger.debug('Wrote config option documentation to ' + str(opts.output))
else:
raise Exception('invalid command: %r' % cmd)
def main():
opts, args = parse_options()
genconfig(opts, args)
if __name__ == '__main__':
main()
| mit | -6,401,203,405,470,451,000 | 37.105511 | 313 | 0.606021 | false |
noironetworks/group-based-policy | gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py | 1 | 4344 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_intra_ptg_db as intra_ptg_db)
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
from gbpservice.neutron.extensions import cisco_apic_gbp
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
LOG = logging.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
class AIMExtensionDriver(api.ExtensionDriver,
intra_ptg_db.ApicIntraPtgDBMixin,
auto_ptg_db.ApicAutoPtgDBMixin):
_supported_extension_alias = cisco_apic_gbp.ALIAS
_extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0
def __init__(self):
LOG.info("AIM Extension __init__")
self._policy_driver = None
@property
def _pd(self):
if not self._policy_driver:
gbp_plugin = directory.get_plugin("GROUP_POLICY")
policy_mgr = gbp_plugin.policy_driver_manager
self._policy_driver = policy_mgr.policy_drivers['aim_mapping'].obj
return self._policy_driver
def initialize(self):
pass
@property
def extension_alias(self):
return self._supported_extension_alias
def _set_intra_ptg_allow(self, session, data, result):
ptg = data['policy_target_group']
query = BAKERY(lambda s: s.query(
gp_db.PolicyTargetGroup))
query += lambda q: q.filter_by(
id=sa.bindparam('id'))
ptg_db = query(session).params(
id=result['id']).one()
if not ptg_db:
raise gpolicy.PolicyTargetGroupNotFound(
policy_target_group_id=result['id'])
if 'intra_ptg_allow' in ptg:
self.set_intra_ptg_allow(
session, policy_target_group_id=result['id'],
intra_ptg_allow=ptg['intra_ptg_allow'])
result['intra_ptg_allow'] = ptg['intra_ptg_allow']
else:
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
def _extend_ptg_dict_with_intra_ptg_allow(self, session, result):
result['intra_ptg_allow'] = self.get_intra_ptg_allow(
session, policy_target_group_id=result['id'])
def process_create_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
result['is_auto_ptg'] = bool(
gpolicy.AUTO_PTG_REGEX.match(result['id']))
self.set_is_auto_ptg(
session, policy_target_group_id=result['id'],
is_auto_ptg=result['is_auto_ptg'])
def process_update_policy_target_group(self, session, data, result):
self._set_intra_ptg_allow(session, data, result)
def extend_policy_target_group_dict(self, session, result):
self._extend_ptg_dict_with_intra_ptg_allow(session, result)
result['is_auto_ptg'] = self.get_is_auto_ptg(
session, policy_target_group_id=result['id'])
self._pd.extend_policy_target_group_dict(session, result)
def extend_application_policy_group_dict(self, session, result):
self._pd.extend_application_policy_group_dict(session, result)
def extend_policy_rule_dict(self, session, result):
self._pd.extend_policy_rule_dict(session, result)
def extend_policy_rule_set_dict(self, session, result):
self._pd.extend_policy_rule_set_dict(session, result)
| apache-2.0 | -5,990,733,584,893,358,000 | 39.222222 | 78 | 0.662063 | false |
JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgdirectinput.py | 1 | 10576 | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgdirectinput"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgViewer
# Translated from file 'DirectInputRegistry.cpp'
# OpenSceneGraph example, osgdirectinput.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgGA/EventQueue>
#include <iostream>
#include "DirectInputRegistry"
typedef std.pair<int, int> KeyValue
typedef std.map<int, KeyValue> KeyMap
g_keyMap = KeyMap()
def buildKeyMap():
# TODO: finish the key map as you wish
g_keyMap[DIK_ESCAPE] = KeyValue(osgGA.GUIEventAdapter.KEY_Escape, 0)
g_keyMap[DIK_1] = KeyValue(ord("1"), 0)
g_keyMap[DIK_2] = KeyValue(ord("2"), 0)
g_keyMap[DIK_3] = KeyValue(ord("3"), 0)
g_keyMap[DIK_4] = KeyValue(ord("4"), 0)
g_keyMap[DIK_5] = KeyValue(ord("5"), 0)
g_keyMap[DIK_6] = KeyValue(ord("6"), 0)
g_keyMap[DIK_7] = KeyValue(ord("7"), 0)
g_keyMap[DIK_8] = KeyValue(ord("8"), 0)
g_keyMap[DIK_9] = KeyValue(ord("9"), 0)
g_keyMap[DIK_0] = KeyValue(ord("0"), 0)
g_keyMap[DIK_MINUS] = KeyValue(ord("-"), 0)
g_keyMap[DIK_EQUALS] = KeyValue(ord("="), 0)
g_keyMap[DIK_BACK] = KeyValue(osgGA.GUIEventAdapter.KEY_BackSpace, 0)
g_keyMap[DIK_TAB] = KeyValue(osgGA.GUIEventAdapter.KEY_Tab, 0)
g_keyMap[DIK_SPACE] = KeyValue(osgGA.GUIEventAdapter.KEY_Space, 0)
bool DirectInputRegistry.initKeyboard( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.CreateDevice( GUID_SysKeyboard, _keyboard, NULL )
if FAILED(hr) or _keyboard==NULL :
osg.notify(osg.WARN), "Unable to create keyboard."
return False
buildKeyMap()
initImplementation = return( handle, _keyboard, c_dfDIKeyboard )
bool DirectInputRegistry.initMouse( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.CreateDevice( GUID_SysMouse, _mouse, NULL )
if FAILED(hr) or _mouse==NULL :
osg.notify(osg.WARN), "Unable to create mouse."
return False
initImplementation = return( handle, _mouse, c_dfDIMouse2 )
bool DirectInputRegistry.initJoystick( HWND handle )
if not _inputDevice : return False
hr = _inputDevice.EnumDevices( DI8DEVCLASS_GAMECTRL, EnumJoysticksCallback,
NULL, DIEDFL_ATTACHEDONLY )
if FAILED(hr) or _joystick==NULL :
osg.notify(osg.WARN), "Unable to enumerate joysticks."
return False
initImplementation = return( handle, _joystick, c_dfDIJoystick2 )
void DirectInputRegistry.updateState( osgGA.EventQueue* eventQueue )
hr = HRESULT()
if not _supportDirectInput or not eventQueue : return
if _keyboard :
pollDevice( _keyboard )
char buffer[256] = 0
hr = _keyboard.GetDeviceState( sizeof(buffer), buffer )
if SUCCEEDED(hr) :
for ( KeyMap.iterator itr=g_keyMap.begin() itr not =g_keyMap.end() ++itr )
key = itr.second
value = buffer[itr.first]
if key.second==value : continue
key.second = value
if value0x80 :
eventQueue.keyPress( key.first )
else:
eventQueue.keyRelease( key.first )
if _mouse :
pollDevice( _mouse )
mouseState = DIMOUSESTATE2()
hr = _mouse.GetDeviceState( sizeof(DIMOUSESTATE2), mouseState )
# TODO: add mouse handlers
if _joystick :
pollDevice( _joystick )
event = JoystickEvent()
hr = _joystick.GetDeviceState( sizeof(DIJOYSTATE2), (event._js) )
if SUCCEEDED(hr) : eventQueue.userEvent( event )
DirectInputRegistry.DirectInputRegistry()
: _keyboard(0), _mouse(0), _joystick(0),
_supportDirectInput(True)
hr = DirectInput8Create( GetModuleHandle(NULL), DIRECTINPUT_VERSION,
IID_IDirectInput8, (VOID**)_inputDevice, NULL )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to create DirectInput object."
_supportDirectInput = False
DirectInputRegistry.~DirectInputRegistry()
releaseDevice( _keyboard )
releaseDevice( _mouse )
releaseDevice( _joystick )
if _inputDevice : _inputDevice.Release()
bool DirectInputRegistry.initImplementation( HWND handle, LPDIRECTINPUTDEVICE8 device, LPCDIDATAFORMAT format )
_supportDirectInput = True
hr = device.SetDataFormat( format )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to set device data format."
_supportDirectInput = False
hr = device.SetCooperativeLevel( handle, DISCL_EXCLUSIVE|DISCL_FOREGROUND )
if FAILED(hr) :
osg.notify(osg.WARN), "Unable to attach device to window."
_supportDirectInput = False
device.Acquire()
return _supportDirectInput
void DirectInputRegistry.pollDevice( LPDIRECTINPUTDEVICE8 device )
hr = device.Poll()
if FAILED(hr) :
device.Acquire()
if hr==DIERR_INPUTLOST :
osg.notify(osg.WARN), "Device lost."
void DirectInputRegistry.releaseDevice( LPDIRECTINPUTDEVICE8 device )
if device :
device.Unacquire()
device.Release()
BOOL CALLBACK DirectInputRegistry.EnumJoysticksCallback( DIDEVICEINSTANCE* didInstance, VOID* )
hr = HRESULT()
device = DirectInputRegistry.instance().getDevice()
if device :
hr = device.CreateDevice( didInstance.guidInstance,
(DirectInputRegistry.instance().getJoyStick()), NULL )
if FAILED(hr) : return DIENUM_CONTINUE
return DIENUM_STOP
# Translated from file 'osgdirectinput.cpp'
# OpenSceneGraph example, osgdirectinput.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgDB/ReadFile>
#include <osgGA/StateSetManipulator>
#include <osgViewer/api/Win32/GraphicsWindowWin32>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <iostream>
#include "DirectInputRegistry"
class CustomViewer (osgViewer.Viewer) :
CustomViewer() : osgViewer.Viewer()
virtual ~CustomViewer()
def eventTraversal():
DirectInputRegistry.instance().updateState( _eventQueue )
osgViewer.Viewer.eventTraversal()
def viewerInit():
windowWin32 = dynamic_cast<osgViewer.GraphicsWindowWin32*>( _camera.getGraphicsContext() )
if windowWin32 :
hwnd = windowWin32.getHWND()
DirectInputRegistry.instance().initKeyboard( hwnd )
#DirectInputRegistry.instance().initMouse( hwnd )
DirectInputRegistry.instance().initJoystick( hwnd )
osgViewer.Viewer.viewerInit()
class JoystickHandler (osgGA.GUIEventHandler) :
JoystickHandler()
def handle(ea, aa):
switch ( ea.getEventType() )
case osgGA.GUIEventAdapter.KEYDOWN:
print "*** Key 0x", std.hex, ea.getKey(), std.dec, " down ***"
break
case osgGA.GUIEventAdapter.KEYUP:
print "*** Key 0x", std.hex, ea.getKey(), std.dec, " up ***"
break
case osgGA.GUIEventAdapter.USER:
event = dynamic_cast< JoystickEvent*>( ea.getUserData() )
if not event : break
js = event._js
for ( unsigned int i=0 i<128 ++i )
if js.rgbButtons[i] :
print "*** Joystick Btn", i, " = ", (int)js.rgbButtons[i]
if js.lX==0x0000 : print "*** Joystick X-"
elif js.lX==0xffff : print "*** Joystick X+"
if js.lY==0 : print "*** Joystick Y-"
elif js.lY==0xffff : print "*** Joystick Y+"
return True
default:
break
return False
def main(argv):
arguments = osg.ArgumentParser( argc, argv )
model = osgDB.readNodeFiles( arguments )
if not model : model = osgDB.readNodeFile( "cow.osgt" )
if not model :
print arguments.getApplicationName(), ": No data loaded"
return 1
viewer = CustomViewer()
viewer.addEventHandler( JoystickHandler )()
viewer.addEventHandler( osgViewer.StatsHandler )()
viewer.addEventHandler( osgViewer.WindowSizeHandler )()
viewer.addEventHandler( osgGA.StateSetManipulator(viewer.getCamera().getOrCreateStateSet()) )
viewer.setSceneData( model )
viewer.setUpViewInWindow( 250, 50, 800, 600 )
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | 779,081,873,386,174,500 | 36.637011 | 111 | 0.647315 | false |
ROS-PSE/arni | arni_gui/src/arni_gui/connection_item.py | 1 | 7933 | import genpy
from rospy.rostime import Time, Duration
from python_qt_binding.QtCore import QTranslator
from abstract_item import AbstractItem
from helper_functions import prepare_number_for_representation, MAXIMUM_OFFLINE_TIME, ROUND_DIGITS
class ConnectionItem(AbstractItem):
"""
A ConnectionItem reresents the connection between a publisher and a subscriber and the topic they are publishing / listening on
"""
def __init__(self, logger, seuid, first_message, parent=None):
"""
Initializes the ConnectionItem.
:param seuid: the seuid of the item
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param type: the type of the item
:type type: str
:param parent: the parent-item
:type parent: AbstractItem
"""
AbstractItem.__init__(self, logger, seuid, parent)
self.__parent = parent
self._type = "connection"
self.add_keys=["dropped_msgs", "traffic"]
self.avg_keys=["period_mean", "period_stddev", "stamp_age_mean", "stamp_age_stddev", "bandwidth", "frequency"]
self.max_keys=["period_max", "stamp_age_max"]
self._attributes = []
self._attributes.extend(["dropped_msgs", "traffic",
"period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max", "bandwidth", "frequency"])
for item in self._attributes:
self._add_data_list(item)
for item in self._attributes:
self._rated_attributes.append(item + ".actual_value")
self._rated_attributes.append(item + ".expected_value")
self._rated_attributes.append(item + ".state")
for item in self._rated_attributes:
self._add_rated_data_list(item)
self._logger.log("info", Time.now(), seuid, "Created a new ConnectionItem")
self.show_as_subscriber = False
self.tree_item1 = None
self.tree_item2 = None
def aggregate_data(self, period):
"""
:param period: The amount in seconds over which the data should be aggregated.
:return:
"""
values = {}
for key in self._attributes:
values[key] = 0
entries = self.get_items_younger_than(Time.now() - (Duration(secs=period) if int(Duration(secs=period).to_sec()) <= int(Time.now().to_sec()) else Time(0) ))
length = len(entries["window_stop"]) if entries["window_stop"] else 0
if length > 0:
for key in self.add_keys:
for i in range(0, length):
values[key] += entries[key][i]
for key in self.max_keys:
if type(entries[key][-1]) == genpy.rostime.Time or type(entries[key][-1]) == genpy.rostime.Duration:
for i in range(0, length):
if entries[key][i].to_sec() > values[key]:
values[key] = entries[key][i].to_sec()
else:
for i in range(0, length):
if entries[key][i] > values[key]:
values[key] = entries[key][i]
for key in self.avg_keys:
if type(entries[key][0]) is genpy.rostime.Time or type(entries[key][0]) is genpy.rostime.Duration:
for i in range(0, length):
values[key] += entries[key][i].to_sec()
else:
for i in range(0, length):
values[key] += entries[key][i]
values[key] = values[key] / length
return values
def execute_action(self, action):
"""
Not senseful, Connection cannot execute actions.
:param action: action to be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns the detailed data of the ConnectionItem.
:returns: str
"""
data_dict = self.get_latest_data()
if Time.now() - data_dict["window_stop"] > Duration(secs=5):
return "No recent data"
content = "<p class=\"detailed_data\">"
content += self.get_erroneous_entries()
if "frequency" in self._attributes:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " <br>"
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) + " " \
+ self.tr("dropped_msgs_unit") + " <br>"
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) + " " \
+ " " + self.tr("bandwidth_unit") + " <br>"
content += self.tr("period_mean") + ": " + prepare_number_for_representation(data_dict["period_mean"]) \
+ " " + self.tr("period_mean_unit") + " <br>"
content += self.tr("period_stddev") + ": " + prepare_number_for_representation(data_dict["period_stddev"]) \
+ " " + self.tr("period_stddev_unit") + " <br>"
content += self.tr("period_max") + ": " + prepare_number_for_representation(data_dict["period_max"]) + " " \
+ self.tr("period_max_unit") + " <br>"
content += self.tr("stamp_age_mean") + ": " + prepare_number_for_representation(data_dict["stamp_age_mean"]) \
+ " " + self.tr("stamp_age_mean_unit") + " <br>"
content += self.tr("stamp_age_stddev") + ": " + prepare_number_for_representation(data_dict["stamp_age_stddev"]) \
+ " " + self.tr("stamp_age_stddev_unit") + " <br>"
content += self.tr("stamp_age_max") + ": " + prepare_number_for_representation(data_dict["stamp_age_max"]) \
+ " " + self.tr("stamp_age_max_unit") + " <br>"
content += "</p>"
return content
def get_plotable_items(self):
"""
Returns items for the plot.
:returns: str[]
"""
return ["dropped_msgs", "bandwidth", "frequency", "period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
def get_short_data(self):
"""
Returns a shortend version of the item data.
:returns: data of the item
:rtype: str
"""
data_dict = self.get_latest_data()
if data_dict["window_stop"] == Time(0):
return "No data yet"
elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME):
# last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline!
return "No data since " + prepare_number_for_representation(Time.now() - data_dict["window_stop"]) \
+ " seconds"
content = ""
if data_dict["state"] is "error":
content += self.get_erroneous_entries_for_log()
else:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " - "
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(
data_dict["bandwidth"]) + " " \
+ self.tr("bandwidth_unit") + " - "
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit")
return content
def get_list_items(self):
return []
def get_time_items(self):
return ["period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
| bsd-2-clause | 8,285,537,754,295,539,000 | 40.103627 | 164 | 0.541787 | false |
pshchelo/ironic | ironic/objects/conductor.py | 1 | 6820 | # coding=utf-8
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as object_base
from ironic.common.i18n import _
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import fields as object_fields
@base.IronicObjectRegistry.register
class Conductor(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add register() and unregister(), make the context parameter
# to touch() optional.
# Version 1.2: Add register_hardware_interfaces() and
# unregister_all_hardware_interfaces()
VERSION = '1.2'
dbapi = db_api.get_instance()
fields = {
'id': object_fields.IntegerField(),
'drivers': object_fields.ListOfStringsField(nullable=True),
'hostname': object_fields.StringField(),
}
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_hostname(cls, context, hostname):
"""Get a Conductor record by its hostname.
:param cls: the :class:`Conductor`
:param context: Security context
:param hostname: the hostname on which a Conductor is running
:returns: a :class:`Conductor` object.
"""
db_obj = cls.dbapi.get_conductor(hostname)
conductor = cls._from_db_object(context, cls(), db_obj)
return conductor
def save(self, context):
"""Save is not supported by Conductor objects."""
raise NotImplementedError(
_('Cannot update a conductor record directly.'))
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads and applies updates for this Conductor.
Loads a :class:`Conductor` with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded chassis column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: Conductor(context)
"""
current = self.get_by_hostname(self._context, hostname=self.hostname)
self.obj_refresh(current)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def touch(self, context=None):
"""Touch this conductor's DB record, marking it as up-to-date."""
self.dbapi.touch_conductor(self.hostname)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
@classmethod
def register(cls, context, hostname, drivers, update_existing=False):
"""Register an active conductor with the cluster.
:param cls: the :class:`Conductor`
:param context: Security context
:param hostname: the hostname on which the conductor will run
:param drivers: the list of drivers enabled in the conductor
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:raises: ConductorAlreadyRegistered
:returns: a :class:`Conductor` object.
"""
db_cond = cls.dbapi.register_conductor(
{'hostname': hostname,
'drivers': drivers,
'version': cls.get_target_version()},
update_existing=update_existing)
return cls._from_db_object(context, cls(), db_cond)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def unregister(self, context=None):
"""Remove this conductor from the service registry."""
self.unregister_all_hardware_interfaces()
self.dbapi.unregister_conductor(self.hostname)
def register_hardware_interfaces(self, hardware_type, interface_type,
interfaces, default_interface):
"""Register hardware interfaces with the conductor.
:param hardware_type: Name of hardware type for the interfaces.
:param interface_type: Type of interfaces, e.g. 'deploy' or 'boot'.
:param interfaces: List of interface names to register.
:param default_interface: String, the default interface for this
hardware type and interface type.
"""
self.dbapi.register_conductor_hardware_interfaces(self.id,
hardware_type,
interface_type,
interfaces,
default_interface)
def unregister_all_hardware_interfaces(self):
"""Unregister all hardware interfaces for this conductor."""
self.dbapi.unregister_conductor_hardware_interfaces(self.id)
| apache-2.0 | -1,894,912,130,061,540,000 | 45.394558 | 78 | 0.643548 | false |
ke4roh/RPiNWR | tests/test_Si4707.py | 1 | 12599 | # -*- coding: utf-8 -*-
__author__ = 'ke4roh'
# Copyright © 2016 James E. Scarborough
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from RPiNWR.Si4707 import *
from RPiNWR.Si4707.mock import MockContext
import unittest
import logging
class TestSi4707(unittest.TestCase):
def test_power_up(self):
logging.basicConfig(level=logging.INFO)
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
result = radio.do_command(PowerUp(function=15)).get(timeout=1)
self.assertEqual("2.0", result.firmware)
time.sleep(.01) # The event will come later, after the command is done.
self.assertEqual(1, len(events))
def test_patch_command(self):
logging.basicConfig(level=logging.INFO)
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
self.assertFalse(radio.radio_power)
result = radio.do_command(
PatchCommand(DEFAULT_CONFIG["power_on"]["patch"], DEFAULT_CONFIG["power_on"]["patch_id"])).get(
timeout=500)
self.assertTrue(radio.radio_power)
# Power Down is supposed to happen as part of the __exit__ routine
self.assertEquals(PowerDown, type(events[-1]))
def test_exception_in_command(self):
class ExceptionalCommand(Command):
def do_command0(self, r):
raise NotImplemented("Oh no!")
with MockContext() as context:
with Si4707(context) as radio:
future = radio.do_command(ExceptionalCommand())
self.assertRaises(Exception, future.get)
def test_set_property(self):
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.register_event_listener(events.append)
radio.power_on({"frequency": 162.4, "properties": {}})
radio.do_command(SetProperty("RX_VOLUME", 5)).get()
self.assertTrue(5, context.props[0x4000])
self.assertRaises(ValueError, SetProperty, "RX_VOLUME", 66)
# Wait up to 2 sec for the shutdown to finish (it should go really fast)
timeout = time.time() + 2
while not type(events[-1]) is PowerDown:
if time.time() >= timeout:
raise TimeoutError()
time.sleep(.002)
# There is supposed to be 500 ms between power up and first tuning when using the crystal
# oscillator. We'll check that.
self.assertTrue(type(events[-1]) is PowerDown)
checked_rtt = False
checked_tune = False
pup_time = 0
for event in events:
if not pup_time:
if type(event) is PowerUp or type(event) is PatchCommand:
pup_time = event.time_complete
self.assertTrue(abs((radio.tune_after - pup_time) * 1000 - 500) < 5,
"tune_after - pup_time should be about 500 ms, but it is %d ms" % int(
(radio.tune_after - pup_time) * 1000))
for event in events:
if type(event) is ReadyToTuneEvent:
self.assertTrue(event.time >= radio.tune_after,
"RTT happened %d ms early." % int((radio.tune_after - event.time) * 1000))
checked_rtt = True
if type(event) is TuneFrequency:
self.assertTrue(event.time_complete >= radio.tune_after,
"Tune happened %d ms early." % int((radio.tune_after - event.time_complete) * 1000))
checked_tune = True
self.assertTrue(checked_tune)
self.assertTrue(checked_rtt)
def test_get_property(self):
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
self.assertEqual(63, radio.do_command(GetProperty("RX_VOLUME")).get())
def test_agc_control(self):
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
self.assertTrue(radio.do_command(GetAGCStatus()).get())
radio.do_command(SetAGCStatus(False)).get()
self.assertFalse(radio.do_command(GetAGCStatus()).get())
radio.do_command(SetAGCStatus(True)).get()
self.assertTrue(radio.do_command(GetAGCStatus()).get())
def test_Tune_162_450(self):
# Check for a rounding error
c = TuneFrequency(162.450)
self.assertEqual(64980, c.frequency)
def test_rsq_interrupts(self):
events = []
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.interrupts |= 8 # RSQ
timeout = time.time() + 5
while not len(list(filter(lambda x: type(x) is ReceivedSignalQualityCheck, events))):
time.sleep(.1)
self.assertTrue(time.time() < timeout)
rsqe = list(filter(lambda x: type(x) is ReceivedSignalQualityCheck, events))
self.assertEqual(1, len(rsqe))
self.assertEqual(1, rsqe[0].frequency_offset)
def test_alert_tone_detection(self): # WB_ASQ_STATUS
events = []
tone_duration = 0.5
tone_duration_tolerance = 0.1
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.alert_tone(True)
time.sleep(tone_duration)
context.alert_tone(False)
time.sleep(0.05)
asqe = list(filter(lambda x: type(x) is AlertToneCheck, events))
self.assertEqual(2, len(asqe))
self.assertTrue(asqe[0].tone_on)
self.assertTrue(asqe[0].tone_start)
self.assertFalse(asqe[0].tone_end)
self.assertFalse(asqe[1].tone_on)
self.assertFalse(asqe[1].tone_start)
self.assertTrue(asqe[1].tone_end)
# Finally, make sure the timing of the tones is measured fairly accurately
self.assertTrue(abs((asqe[1].time_complete - asqe[0].time_complete) - tone_duration) < tone_duration_tolerance,
"Tone duration measured as %f sec - spec called for %f±%f" % (
asqe[1].time_complete - asqe[0].time_complete, tone_duration, tone_duration_tolerance))
self.assertIsNone(asqe[0].duration)
self.assertTrue(abs(asqe[1].time_complete - asqe[0].time_complete - asqe[1].duration) < 0.01)
def __filter_same_events(self, events, interrupt):
return list(filter(lambda x: type(x) is SameInterruptCheck and x.status[interrupt], events))
def __wait_for_eom_events(self, events, n=3, timeout=30):
timeout = time.time() + timeout
while len(self.__filter_same_events(events, "EOMDET")) < n and not time.time() >= timeout:
time.sleep(.02)
def test_send_message(self):
events = []
message = '-WXR-RWT-020103-020209-020091-020121-029047-029165-029095-029037+0030-3031700-KEAX/NWS-'
message2 = '-WXR-TOR-020103+0030-3031701-KEAX/NWS-'
interrupts_cleared = [0]
with MockContext() as context:
def check_interrupts_cleared(event):
try:
if event.intack:
self.assertEqual(0, context.interrupts)
interrupts_cleared[0] += 1
except AttributeError:
pass
with Si4707(context) as radio:
radio.register_event_listener(check_interrupts_cleared)
radio.power_on({"transmitter": "KID77"})
radio.register_event_listener(events.append)
context.send_message(message=message, voice_duration=80, time_factor=0.1)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertEquals(message, same_messages[0].message.get_SAME_message()[0])
for interrupt in ["EOMDET", "HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(3, times, "Interrupt %s happened %d times" % (interrupt, times))
# Test alert tone check
alert_tones = list(filter(lambda x: type(x) is AlertToneCheck, events))
self.assertEquals(2, len(alert_tones))
self.assertTrue(alert_tones[0].tone_on)
self.assertFalse(alert_tones[1].tone_on)
self.assertTrue(abs(alert_tones[1].duration - .8) < 0.01)
# Test EOM
self.assertEquals(1, len(list(filter(lambda x: type(x) is EndOfMessage, events))))
self.assertEqual(0, sum(context.same_buffer), "Buffer wasn't flushed")
# And the correct number of interrupts needs to have been handled
self.assertTrue(10 < interrupts_cleared[0] < 13, interrupts_cleared[0])
# Send another message to ensure that the last one got cleared out properly
context.send_message(message=message2, voice_duration=0, time_factor=0.1)
self.__wait_for_eom_events(events, 6)
self.assertEqual(0, len(list(filter(lambda x: type(x) is CommandExceptionEvent, events))))
def test_send_message_no_tone_2_headers(self):
# This will hit the timeout.
events = []
message = '-WXR-RWT-020103-020209-020091-020121-029047-029165-029095-029037+0030-3031700-KEAX/NWS-'
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.send_message(message=message, voice_duration=50, time_factor=0.1, header_count=2, tone=None)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertEquals(message, same_messages[0].message.get_SAME_message()[0])
for interrupt in ["HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(2, times, "Interrupt %s happened %d times" % (interrupt, times))
def test_send_invalid_message(self):
# This will hit the timeout.
events = []
message = '-WWF-RWT-020103-020209-020091-020121-029047-029165'
with MockContext() as context:
with Si4707(context) as radio:
radio.power_on({"frequency": 162.4})
radio.register_event_listener(events.append)
context.send_message(message=message, time_factor=0.1, tone=None, invalid_message=True)
self.__wait_for_eom_events(events)
same_messages = list(filter(lambda x: type(x) is SAMEMessageReceivedEvent, events))
self.assertEquals(1, len(same_messages))
self.assertTrue(same_messages[0].message.headers[0][0].startswith(message),
"%s != %s " % (message, same_messages[0].message.headers[0][0]))
for interrupt in ["HDRRDY", "PREDET"]:
times = len(self.__filter_same_events(events, interrupt))
self.assertEquals(3, times, "Interrupt %s happened %d times" % (interrupt, times))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,110,605,552,138,210,300 | 45.483395 | 119 | 0.595856 | false |
astrofra/demo-unusual-suspects | Python-toolchain/3D/vector3.py | 1 | 17953 | from math import *
from util import format_number
class Vector3(object):
__slots__ = ('_v',)
def __init__(self, *args):
"""Creates a Vector3 from 3 numeric values or a list-like object
containing at least 3 values. No arguments result in a null vector.
"""
if len(args) == 3:
self._v = map(float, args[:3])
return
if not args:
self._v = [0., 0., 0.]
elif len(args) == 1:
self._v = map(float, args[0][:3])
else:
raise ValueError("Vector3.__init__ takes 0, 1 or 3 parameters")
@classmethod
def from_points(cls, p1, p2):
v = cls.__new__(cls, object)
ax, ay, az = p1
bx, by, bz = p2
v._v = [bx-ax, by-ay, bz-az]
return v
@classmethod
def from_floats(cls, x, y, z):
"""Creates a Vector3 from individual float values.
Warning: There is no checking for efficiency here: x, y, z _must_ be
floats.
"""
v = cls.__new__(cls, object)
v._v = [x, y, z]
return v
@classmethod
def from_iter(cls, iterable):
"""Creates a Vector3 from an iterable containing at least 3 values."""
it = iter(iterable)
next = it.next
v = cls.__new__(cls, object)
v._v = [ float(next()), float(next()), float(next()) ]
return v
def copy(self):
"""Returns a copy of this vector."""
v = self.__new__(self.__class__, object)
v._v = self._v[:]
return v
#return self.from_floats(self._v[0], self._v[1], self._v[2])
__copy__ = copy
def _get_x(self):
return self._v[0]
def _set_x(self, x):
assert isinstance(x, float), "Must be a float"
self._v[0] = x
x = property(_get_x, _set_x, None, "x component.")
def _get_y(self):
return self._v[1]
def _set_y(self, y):
assert isinstance(y, float), "Must be a float"
self._v[1] = y
y = property(_get_y, _set_y, None, "y component.")
def _get_z(self):
return self._v[2]
def _set_z(self, z):
assert isinstance(z, float), "Must be a float"
self._v[2] = z
z = property(_get_z, _set_z, None, "z component.")
def _get_length(self):
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
def _set_length(self, length):
v = self._v
try:
x, y, z = v
l = length / sqrt(x*x + y*y +z*z)
except ZeroDivisionError:
v[0] = 0.
v[1] = 0.
v[2] = 0.
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
length = property(_get_length, _set_length, None, "Length of the vector")
def unit(self):
"""Returns a unit vector."""
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
def set(self, x, y, z):
"""Sets the components of this vector.
x -- x component
y -- y component
z -- z component
"""
assert ( isinstance(x, float) and
isinstance(y, float) and
isinstance(z, float) ), "x, y, z must be floats"
v = self._v
v[0] = x
v[1] = y
v[2] = z
return self
def __str__(self):
x, y, z = self._v
return "(%s, %s, %s)" % (format_number(x),
format_number(y),
format_number(z))
def __repr__(self):
x, y, z = self._v
return "Vector3(%s, %s, %s)" % (x, y, z)
def __len__(self):
return 3
def __iter__(self):
"""Iterates the components in x, y, z order."""
return iter(self._v[:])
def __getitem__(self, index):
"""Retrieves a component, given its index.
index -- 0, 1 or 2 for x, y or z
"""
try:
return self._v[index]
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __setitem__(self, index, value):
"""Sets a component, given its index.
index -- 0, 1 or 2 for x, y or z
value -- New (float) value of component
"""
assert isinstance(value, float), "Must be a float"
try:
self._v[index] = value
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __eq__(self, rhs):
"""Test for equality
rhs -- Vector or sequence of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x==xx and y==yy and z==zz
def __ne__(self, rhs):
"""Test of inequality
rhs -- Vector or sequenece of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x!=xx or y!=yy or z!=zz
def __hash__(self):
return hash(tuple(self._v))
def __add__(self, rhs):
"""Returns the result of adding a vector (or collection of 3 numbers)
from this vector.
rhs -- Vector or sequence of 2 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x+ox, y+oy, z+oz)
def __iadd__(self, rhs):
"""Adds another vector (or a collection of 3 numbers) to this vector.
rhs -- Vector or sequence of 2 values
"""
ox, oy, oz = rhs
v = self._v
v[0] += ox
v[1] += oy
v[2] += oz
return self
def __radd__(self, lhs):
"""Adds vector to this vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(x+ox, y+oy, z+oz)
def __sub__(self, rhs):
"""Returns the result of subtracting a vector (or collection of
3 numbers) from this vector.
rhs -- 3 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x-ox, y-oy, z-oz)
def _isub__(self, rhs):
"""Subtracts another vector (or a collection of 3 numbers) from this
vector.
rhs -- Vector or sequence of 3 values
"""
ox, oy, oz = rhs
v = self._v
v[0] -= ox
v[1] -= oy
v[2] -= oz
return self
def __rsub__(self, lhs):
"""Subtracts a vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(ox-x, oy-y, oz-z)
def scalar_mul(self, scalar):
v = self._v
v[0] *= scalar
v[1] *= scalar
v[2] *= scalar
def vector_mul(self, vector):
x, y, z = vector
v= self._v
v[0] *= x
v[1] *= y
v[2] *= z
def get_scalar_mul(self, scalar):
x, y, z = self.scalar
return self.from_floats(x*scalar, y*scalar, z*scalar)
def get_vector_mul(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x * xx, y * yy, z * zz)
def __mul__(self, rhs):
"""Return the result of multiplying this vector by another vector, or
a scalar (single number).
rhs -- Vector, sequence or single value.
"""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*rhs, y*rhs, z*rhs)
def __imul__(self, rhs):
"""Multiply this vector by another vector, or a scalar
(single number).
rhs -- Vector, sequence or single value.
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def __rmul__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*lhs, y*lhs, z*lhs)
def __div__(self, rhs):
"""Return the result of dividing this vector by another vector, or a scalar (single number)."""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x/ox, y/oy, z/oz)
else:
return self.from_floats(x/rhs, y/rhs, z/rhs)
def __idiv__(self, rhs):
"""Divide this vector by another vector, or a scalar (single number)."""
v = self._v
if hasattr(rhs, "__getitem__"):
v[0] /= ox
v[1] /= oy
v[2] /= oz
else:
v[0] /= rhs
v[1] /= rhs
v[2] /= rhs
return self
def __rdiv__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(ox/x, oy/y, oz/z)
else:
return self.from_floats(lhs/x, lhs/y, lhs/z)
def scalar_div(self, scalar):
v = self._v
v[0] /= scalar
v[1] /= scalar
v[2] /= scalar
def vector_div(self, vector):
x, y, z = vector
v= self._v
v[0] /= x
v[1] /= y
v[2] /= z
def get_scalar_div(self, scalar):
x, y, z = self.scalar
return self.from_floats(x / scalar, y / scalar, z / scalar)
def get_vector_div(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x / xx, y / yy, z / zz)
def __neg__(self):
"""Returns the negation of this vector (a vector pointing in the opposite direction.
eg v1 = Vector(1,2,3)
print -v1
>>> (-1,-2,-3)
"""
x, y, z = self._v
return self.from_floats(-x, -y, -z)
def __pos__(self):
return self.copy()
def __nonzero__(self):
x, y, z = self._v
return x and y and z
def __call__(self, keys):
"""Returns a tuple of the values in a vector
keys -- An iterable containing the keys (x, y or z)
eg v = Vector3(1.0, 2.0, 3.0)
v('zyx') -> (3.0, 2.0, 1.0)
"""
ord_x = ord('x')
v = self._v
return tuple( v[ord(c)-ord_x] for c in keys )
def as_tuple(self):
"""Returns a tuple of the x, y, z components. A little quicker than
tuple(vector)."""
return tuple(self._v)
def scale(self, scale):
"""Scales the vector by onther vector or a scalar. Same as the
*= operator.
scale -- Value to scale the vector by
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def get_length(self):
"""Calculates the length of the vector."""
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
get_magnitude = get_length
def set_length(self, new_length):
"""Sets the length of the vector. (Normalises it then scales it)
new_length -- The new length of the vector.
"""
v = self._v
try:
x, y, z = v
l = new_length / sqrt(x*x + y*y + z*z)
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
return self
def get_distance_to(self, p):
"""Returns the distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return sqrt( dx*dx + dy*dy + dz*dz )
def get_distance_to_squared(self, p):
"""Returns the squared distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return dx*dx + dy*dy + dz*dz
def normalise(self):
"""Scales the vector to be length 1."""
v = self._v
x, y, z = v
l = sqrt(x*x + y*y + z*z)
try:
v[0] /= l
v[1] /= l
v[2] /= l
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
normalize = normalise
def get_normalised(self):
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
get_normalized = get_normalised
def in_sphere(self, sphere):
"""Returns true if this vector (treated as a position) is contained in
the given sphere.
"""
return distance3d(sphere.position, self) <= sphere.radius
def dot(self, other):
"""Returns the dot product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
ox, oy, oz = other
return x*ox + y*oy + z*oz
def cross(self, other):
"""Returns the cross product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return self.from_floats( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def cross_tuple(self, other):
"""Returns the cross product of this vector with another, as a tuple.
This avoids the Vector3 construction if you don't need it.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return ( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def distance3d_squared(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return dx*dx + dy*dy +dz*dz
def distance3d(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return sqrt(dx*dx + dy*dy +dz*dz)
def centre_point3d(points):
return sum( Vector3(p) for p in points ) / len(points)
if __name__ == "__main__":
v1 = Vector3(2.2323, 3.43242, 1.)
print 3*v1
print (2, 4, 6)*v1
print (1, 2, 3)+v1
print v1('xxxyyyzzz')
print v1[2]
print v1.z
v1[2]=5.
print v1
v2= Vector3(1.2, 5, 10)
print v2
v1 += v2
print v1.get_length()
print repr(v1)
print v1[1]
p1 = Vector3(1,2,3)
print p1
print repr(p1)
for v in p1:
print v
#print p1[6]
ptest = Vector3( [1,2,3] )
print ptest
z = Vector3()
print z
file("test.txt", "w").write( "\n".join(str(float(n)) for n in range(20)) )
f = file("test.txt")
v1 = Vector3.from_iter( f )
v2 = Vector3.from_iter( f )
v3 = Vector3.from_iter( f )
print v1, v2, v3
print "--"
print v1
print v1 + (10,20,30)
print v1('xz')
print -v1
#print tuple(ptest)
#p1.set( (4, 5, 6) )
#print p1
print Vector3(10,10,30)+v1 | mit | -5,893,449,000,307,266,000 | 23.576923 | 117 | 0.408233 | false |
csb-toolbox/CSB | csb/test/cases/bio/io/wwpdb/__init__.py | 1 | 23555 | import os
import sys
import csb.test as test
from csb.bio.io.wwpdb import EntryID, StandardID, DegenerateID, SeqResID, InvalidEntryIDError, HeaderFormatError
from csb.bio.io.wwpdb import RobustResidueMapper, FastResidueMapper, CombinedResidueMapper, ResidueMappingError, SparseChainSequence
from csb.bio.io.wwpdb import StructureParser, RegularStructureParser, LegacyStructureParser, UnknownPDBResidueError
from csb.bio.io.wwpdb import get, find, FileSystemStructureProvider, RemoteStructureProvider, CustomStructureProvider, StructureNotFoundError
from csb.bio.sequence import SequenceAlphabets, ProteinAlphabet, SequenceTypes, RichSequence
from csb.bio.structure import ChemElements, SecStructures, Structure, Chain
@test.regression
class TestBiomoleculeRegressions(test.Case):
def testCommaSplitting(self):
"""
@see: [CSB 0000067]
"""
pdbfile = self.config.getTestFile('3shm_ca.pdb')
parser = LegacyStructureParser(pdbfile)
s1 = parser.parse_biomolecule(1, True)
self.assertEqual(len(s1.chains), 60)
self.assertEqual(s1.first_chain.id, 'A')
@test.regression
class TestSecStructureRegressions(test.Case):
def testSecStructureParsing(self):
"""
@see: [CSB 0000045]
"""
pdbfile = self.config.getTestFile('1d3z.regular.pdb')
structure = StructureParser(pdbfile).parse_structure(1)
self.assertTrue(structure.chains['A'].secondary_structure is not None)
@test.regression
class TestMappingRegressions(test.Case):
def testHetMapping(self):
"""
@see: [CSB 0000031]
"""
pdbfile = self.config.getTestFile('1d3z.regular.pdb')
structure = StructureParser(pdbfile).parse_structure(1)
residue = structure.chains['A'].find(26)
self.assertTrue(residue.has_structure)
self.assertTrue(residue.atoms.length > 0)
for an in residue.atoms:
self.assertTrue(residue[an].vector.tolist())
def testNonStandardResidueMapping(self):
"""
@see: [CSB 0000052]
"""
pdbfile = self.config.getTestFile('3p1u.pdb')
chain = StructureParser(pdbfile).parse_structure().chains['A']
for residue in chain.residues:
if residue.rank < 15:
self.assertFalse(residue.has_structure)
elif residue.rank in (15, 16):
self.assertTrue(residue.has_structure)
self.assertEqual(chain.residues[2].sequence_number, None)
self.assertEqual(chain.residues[15].sequence_number, 39)
self.assertEqual(chain.residues[16].sequence_number, 40)
@test.unit
class TestStructureParser(test.Case):
def setUp(self):
super(TestStructureParser, self).setUp()
regular_file = self.config.getTestFile('1d3z.regular.pdb')
legacy_file = self.config.getTestFile('1d3z.legacy.pdb')
self.rp = StructureParser(regular_file)
self.lp = StructureParser(legacy_file)
def testFactory(self):
self.assertTrue(isinstance(self.rp, RegularStructureParser))
self.assertTrue(isinstance(self.lp, LegacyStructureParser))
@test.unit
class TestLegacyStructureParser(test.Case):
def setUp(self):
super(TestLegacyStructureParser, self).setUp()
self.pdb = self.config.getTestFile('1d3z.legacy.pdb')
self.parser = LegacyStructureParser(self.pdb)
def testParseModels(self):
ensemble = self.parser.parse_models()
self.assertEqual(ensemble.models.length, 10)
self.assertEqual(ensemble[0].model_id, 1)
self.assertEqual(ensemble.models[1].model_id, 1)
def testParseStructure(self):
structure = self.parser.parse(model=1)
self.assertEqual(self.parser.parse_structure().model_id, 1)
self.assertEqual(structure.accession, '1d3z')
self.assertEqual(structure.model_id, 1)
# Chain level
self.assertEqual(structure.chains.length, 1)
self.assertEqual(len(structure.chains), 1)
self.assertEqual(structure.first_chain.molecule_id, '1')
self.assertEqual(structure.chains['A'].sequence, 'MQIFVKTLTGKTITLEVEPSDTIENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVLRLRGG')
self.assertEqual(len(structure.chains['A']), 76)
self.assertEqual(len(structure['A']), 76)
# Residue level
self.assertEqual(len(structure['A'][1:10]), 9)
self.assertEqual(structure['A'][0].type, SequenceAlphabets.Protein.MET)
self.assertEqual(structure['A'][0].label, 'MSE')
self.assertEqual(structure['A'][1].label, 'GLN')
self.assertTrue(structure['A'][0].is_modified)
self.assertFalse(structure['A'][1].is_modified)
# Atom level
self.assertEqual(structure['A'][1].atoms['CA'].element, None)
self.assertNotEqual(structure['A'][2].atoms['CA'].element, None)
self.assertEqual(structure['A'][2].atoms['CA'].element, ChemElements.C)
vector = [51.653, -89.304, 8.833]
self.assertEqual(structure['A'][0]['CA'].vector.tolist(), vector)
def testParseResidue(self):
self.assertEqual(self.parser.parse_residue('AGM'), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue('AGM', as_type=SequenceTypes.Protein), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertRaises(UnknownPDBResidueError, self.parser.parse_residue, 'AGM', as_type=SequenceTypes.NucleicAcid)
def testParseResidueSafe(self):
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=None), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=SequenceTypes.Protein), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=SequenceTypes.NucleicAcid), SequenceAlphabets.Nucleic.Any.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('junk', as_type=SequenceTypes.Protein), SequenceAlphabets.Unknown.UNK.name) #@UndefinedVariable
def testGuessSequenceType(self):
self.assertEqual(self.parser.guess_sequence_type('AGM'), SequenceTypes.Protein)
self.assertEqual(self.parser.guess_sequence_type('DOC'), SequenceTypes.NucleicAcid)
self.assertRaises(UnknownPDBResidueError, self.parser.guess_sequence_type, 'junk')
def testFileName(self):
self.assertEqual(self.parser.filename, self.pdb)
def testModels(self):
self.assertEqual(self.parser.models(), list(range(1, 11)))
def testParseBiomolecule(self):
pdbfile = self.config.getTestFile('3p1u.pdb')
parser = LegacyStructureParser(pdbfile)
s2 = parser.parse_biomolecule(2)
self.assertEqual(len(s2.chains), 1)
self.assertEqual(s2.first_chain.id, 'B1')
self.assertRaises(KeyError, parser.parse_biomolecule, 3)
def testParseHetMolecules(self):
with self.config.getTempStream() as tmp:
tmp.write('HETATM 1 NA BLM A 1 -14.575 27.241 3.310 1.00 0.00 N ')
tmp.flush()
parser = LegacyStructureParser(tmp.name)
self.assertRaises(HeaderFormatError, parser.parse_structure)
del parser
@test.unit
class TestRegularStructureParser(test.Case):
def setUp(self):
super(TestRegularStructureParser, self).setUp()
self.pdb = self.config.getTestFile('1d3z.regular.pdb')
self.mapping = self.config.getTestFile('mapping.pdb')
self.parser = RegularStructureParser(self.pdb)
def testMapper(self):
p = RegularStructureParser(self.pdb, mapper=None)
self.assertTrue(isinstance(p.mapper, CombinedResidueMapper))
p.mapper = FastResidueMapper()
self.assertTrue(isinstance(p.mapper, FastResidueMapper))
def testCombinedMapping(self):
# default mapper
c = self.parser.parse(self.mapping)['E']
self.assertEqual(c.residues[14].type, ProteinAlphabet.GLU)
self.assertEqual(c.residues[15].type, ProteinAlphabet.GLU)
self.assertEqual(c.residues[16].type, ProteinAlphabet.THR)
self.assertEqual(4, sum([1 for r in c if r.has_structure]))
# explicit combined mapper
self.parser.mapper = CombinedResidueMapper()
c = self.parser.parse(self.mapping)['E']
self.assertEqual(4, sum([1 for r in c if r.has_structure]))
def testFastMapping(self):
self.parser.mapper = FastResidueMapper()
self.assertRaises(ResidueMappingError, self.parser.parse, self.mapping)
mapping2 = self.config.getTestFile('mapping2.pdb')
c = self.parser.parse(mapping2)['E']
self.assertEqual(2, sum([1 for r in c if r.has_structure]))
def testRobustMapping(self):
mapping3 = self.config.getTestFile('mapping3.pdb')
self.parser.mapper = RobustResidueMapper()
self.assertRaises(ResidueMappingError, self.parser.parse, mapping3)
c = self.parser.parse(self.mapping)['E']
self.assertEqual(4, sum([1 for r in c if r.has_structure]))
def testParseModels(self):
ensemble = self.parser.parse_models()
self.assertEqual(ensemble.models.length, 10)
self.assertEqual(ensemble[0].model_id, 1)
self.assertEqual(ensemble.models[1].model_id, 1)
self.assertRaises(ValueError, self.parser.parse_models, (999, 1000))
pdb = self.config.getTestFile('3p1u.pdb')
ensemble = RegularStructureParser(pdb).parse_models()
self.assertEqual(ensemble.models.length, 1)
self.assertEqual(ensemble[0].model_id, 1)
self.assertEqual(ensemble[0].resolution, 2.05)
def testParseStructure(self):
structure = self.parser.parse(model=2)
self.assertEqual(self.parser.parse_structure().model_id, 1)
self.assertEqual(structure.resolution, None)
self.assertEqual(structure.accession, '1d3z')
self.assertEqual(structure.model_id, 2)
# Chain level
self.assertEqual(structure.chains.length, 1)
self.assertEqual(len(structure.chains), 1)
self.assertEqual(structure.chains['A'].sequence, 'MQIFVKTLTGKTITLEVEPSDTIENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVLRLRGG')
self.assertEqual(structure.chains['A'].sequence, ''.join([str(r.type) for r in structure.chains['A'] if r.has_structure]))
ss = structure.chains['A'].secondary_structure
self.assertEqual(ss.to_string(), '-EEEEE-----EEEEE-----HHHHHHHHHHHHHH-HHH-EEEEE--EE------HHHHH-----EEEEEE')
self.assertEqual(len(ss.scan(1, 99, filter=SecStructures.Helix)), 3)
self.assertEqual(ss[1].start, 2)
self.assertEqual(ss[1].end, 6)
self.assertEqual(len(structure.chains['A']), 76)
self.assertEqual(len(structure['A']), 76)
# Residue level
self.assertEqual(len(structure['A'][1:10]), 9)
self.assertEqual(structure['A'][0].type, SequenceAlphabets.Protein.MET)
self.assertEqual(structure['A'][0].label, 'MSE')
self.assertEqual(structure['A'][1].label, 'GLN')
# Atom
vector = [52.647, -87.443, 9.674]
self.assertEqual(structure['A'][0]['CA'].vector.tolist(), vector)
def testParseResidue(self):
self.assertEqual(self.parser.parse_residue('AGM'), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue('AGM', as_type=SequenceTypes.Protein), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertRaises(UnknownPDBResidueError, self.parser.parse_residue, 'AGM', as_type=SequenceTypes.NucleicAcid)
def testParseResidueSafe(self):
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=None), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=SequenceTypes.Protein), SequenceAlphabets.Protein.ARG.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('AGM', as_type=SequenceTypes.NucleicAcid), SequenceAlphabets.Nucleic.Any.name) #@UndefinedVariable
self.assertEqual(self.parser.parse_residue_safe('junk', as_type=SequenceTypes.Protein), SequenceAlphabets.Unknown.UNK.name) #@UndefinedVariable
def testGuessSequenceType(self):
self.assertEqual(self.parser.guess_sequence_type('AGM'), SequenceTypes.Protein)
self.assertEqual(self.parser.guess_sequence_type('DOC'), SequenceTypes.NucleicAcid)
self.assertRaises(UnknownPDBResidueError, self.parser.guess_sequence_type, 'junk')
def testFileName(self):
self.assertEqual(self.parser.filename, self.pdb)
def testModels(self):
self.assertEqual(self.parser.models(), list(range(1, 11)))
@test.unit
class TestFastResidueMapper(test.Case):
def setUp(self):
super(TestFastResidueMapper, self).setUp()
self.mapper = FastResidueMapper()
def _build(self, string):
id = str(hash(string))
seq = RichSequence(id, "", string, SequenceTypes.Protein)
return SparseChainSequence.create(Chain.from_sequence(seq))
def testMap(self):
ref = self._build("ZABCD")
sparse = self._build("AC")
self.assertRaises(ResidueMappingError, self.mapper.map, sparse, ref)
sparse.residues[2].id = (22, None)
result = self.mapper.map(sparse, ref)
self.assertEqual(result.sequence, "-A-C-")
def testModifiedResidueMapping(self):
"""
Strictly speaking, this is a regression test. But it so essential that
we should keep it here.
@see: [csb: 19]
"""
pdb = self.config.getTestFile('modified.pdb')
structure = StructureParser(pdb, mapper=self.mapper).parse_structure()
chain = structure.first_chain
self.assertFalse(chain.residues[1].has_structure)
self.assertEqual(chain.residues[1].label, "MET")
self.assertTrue(chain.residues[19].has_structure)
self.assertEqual(chain.residues[19].label, "MSE")
@test.unit
class TestRobustResidueMapper(TestFastResidueMapper):
def setUp(self):
super(TestRobustResidueMapper, self).setUp()
self.mapper = RobustResidueMapper()
def testMap(self):
ref = self._build("ABCD")
sparse = self._build("EF")
self.assertRaises(ResidueMappingError, self.mapper.map, sparse, ref)
ref = self._build("ZABCD")
sparse = self._build("AC")
result = self.mapper.map(sparse, ref)
self.assertEqual(result.sequence, "-A-C-")
def testModifiedResidueMapping(self):
pdb = self.config.getTestFile('modified2.pdb')
structure = StructureParser(pdb, mapper=self.mapper).parse_structure()
chain = structure.first_chain
self.assertTrue(chain.residues[1].has_structure)
self.assertEqual(chain.residues[1].label, "MSE")
self.assertFalse(chain.residues[19].has_structure)
self.assertEqual(chain.residues[19].label, "MET")
@test.unit
class TestCombinedResidueMapper(TestFastResidueMapper):
def setUp(self):
super(TestCombinedResidueMapper, self).setUp()
self.mapper = CombinedResidueMapper()
def testMap(self):
ref = self._build("ZABCD")
sparse = self._build("AC")
result = self.mapper.map(sparse, ref)
self.assertEqual(result.sequence, "-A-C-")
@test.unit
class TestFileSystemProvider(test.Case):
def setUp(self):
super(TestFileSystemProvider, self).setUp()
self.path = self.config.data
self.provider = FileSystemStructureProvider()
self.provider.add(self.path)
def testAdd(self):
self.assertEqual(len(self.provider.paths), 1)
self.provider.add('.')
self.assertEqual(self.provider.paths[1], '.')
self.assertEqual(len(self.provider.paths), 2)
self.assertRaises(IOError, self.provider.add, 'non-exi$ting path')
def testRemove(self):
self.assertEqual(len(self.provider.paths), 1)
self.provider.remove(self.path)
self.assertEqual(len(self.provider.paths), 0)
self.assertRaises(ValueError, self.provider.remove, 'non-exi$ting path')
def testFind(self):
f1 = self.provider.find('3p1u')
f2 = self.config.getTestFile('3p1u.pdb')
self.assertEqual(os.path.abspath(f1), os.path.abspath(f2))
self.assertEqual(None, self.provider.find('$'))
def testGet(self):
s = self.provider.get('3p1u')
self.assertEqual(s.accession, '3p1u')
self.assertTrue(isinstance(s, Structure))
self.assertRaises(StructureNotFoundError, self.provider.get, '$')
@test.unit
class TestCustomProvider(test.Case):
def setUp(self):
super(TestCustomProvider, self).setUp()
self.path = self.config.getTestFile('3p1u.pdb')
self.provider = CustomStructureProvider({'3p1u': self.path})
def testAdd(self):
self.assertEqual(len(self.provider.paths), 1)
self.assertEqual(self.provider.paths[0], self.path)
self.provider.add('test', self.config.getTestFile('d1nz0a_.pdb'))
self.assertEqual(len(self.provider.paths), 2)
self.assertRaises(IOError, self.provider.add, 'test', 'non-exi$ting path')
def testRemove(self):
self.assertEqual(len(self.provider.paths), 1)
self.provider.remove('3p1u')
self.assertEqual(len(self.provider.paths), 0)
self.assertRaises(ValueError, self.provider.remove, '$')
def testFind(self):
f1 = self.provider.find('3p1u')
f2 = self.config.getTestFile('3p1u.pdb')
self.assertEqual(os.path.abspath(f1), os.path.abspath(f2))
self.assertEqual(None, self.provider.find('$'))
def testGet(self):
s = self.provider.get('3p1u')
self.assertEqual(s.accession, '3p1u')
self.assertTrue(isinstance(s, Structure))
self.assertRaises(StructureNotFoundError, self.provider.get, '$')
@test.unit
class TestRemoteProvider(test.Case):
def setUp(self):
super(TestRemoteProvider, self).setUp()
self.provider = RemoteStructureProvider()
def testGet(self):
s = self.provider.get('3p1u')
self.assertEqual(s.accession, '3p1u')
self.assertTrue(isinstance(s, Structure))
self.provider.prefix = 'http://www.google.com/NotExisting'
self.assertRaises(StructureNotFoundError, self.provider.get, 'NoSuchFile')
@test.functional
class TestGet(test.Case):
def runTest(self):
structure = get('1d3z')
self.assertEqual(structure.accession, '1d3z')
# Chain level
self.assertEqual(structure.chains.length, 1)
self.assertEqual(len(structure.chains), 1)
self.assertEqual(len(structure.chains['A']), 76)
self.assertEqual(len(structure['A']), 76)
# Residue level
self.assertEqual(len(structure['A'][1:10]), 9)
self.assertEqual(structure['A'][0].type,SequenceAlphabets.Protein.MET)
@test.functional
class TestFind(test.Case):
def runTest(self):
f2 = find('3p1u', [self.config.data])
f1 = self.config.getTestFile('3p1u.pdb')
self.assertEqual(os.path.abspath(f1), os.path.abspath(f2))
self.assertEqual(None, find('$', self.config.data))
@test.unit
class TestEntryID(test.Case):
def setUp(self):
super(TestEntryID, self).setUp()
def testFactory(self):
self.assertTrue(isinstance(EntryID.create('abcdE'), StandardID))
self.assertTrue(isinstance(EntryID.create('abcdddE'), DegenerateID))
self.assertTrue(isinstance(EntryID.create('abcd_E'), SeqResID))
@test.unit
class TestStandardID(test.Case):
def setUp(self):
super(TestStandardID, self).setUp()
self.id = StandardID('abCdE')
self.accession = 'abcd'
self.chain = 'E'
def testAccession(self):
self.assertEqual(self.id.accession, self.accession)
def testChain(self):
self.assertEqual(self.id.chain, self.chain)
def testEntryID(self):
self.assertEqual(self.id.entry_id, self.accession + self.chain)
def testFormat(self):
self.assertEqual(self.id.format(), self.accession + self.chain)
def testConstructor(self):
self.assertRaises(InvalidEntryIDError, StandardID, 'aE')
@test.unit
class TestDegenerateID(TestStandardID):
def setUp(self):
super(TestDegenerateID, self).setUp()
self.id = DegenerateID('abCdddE')
self.accession = 'abcddd'
self.chain = 'E'
@test.unit
class TestSeqResID(TestStandardID):
def setUp(self):
super(TestSeqResID, self).setUp()
self.id = SeqResID('abCd_E')
def testFormat(self):
self.assertEqual(self.id.format(), 'abcd_E')
def testConstructor(self):
self.assertRaises(InvalidEntryIDError, SeqResID, 'abcdE')
@test.custom
def TestPDB():
import glob
class PDBTestCase(test.Case):
def runTest(self):
try:
StructureParser(self.entry).parse_structure()
except:
sys.stdout.write("\n{0}\n".format(self.entry))
self.reRaise([self.entry])
var = 'PDBMASK'
suite = test.unittest.TestSuite()
if var in os.environ:
for entry in glob.glob(os.environ[var]):
case = PDBTestCase()
case.entry = entry
suite.addTest(case)
return suite
if __name__ == '__main__':
test.Console()
| mit | -2,096,132,994,395,017,200 | 35.462848 | 170 | 0.613288 | false |
sourcepole/qgis | python/plugins/fTools/tools/doVisual.py | 1 | 21411 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from ui_frmVisual import Ui_Dialog
import ftools_utils
import math
class VisualDialog( QDialog, Ui_Dialog ):
def __init__( self, iface, function ):
QDialog.__init__( self )
self.iface = iface
self.setupUi( self )
self.myFunction = function
if self.myFunction == 2 or self.myFunction == 3:
QObject.connect( self.inShape, SIGNAL( "currentIndexChanged(QString)" ), self.update )
self.manageGui()
self.cancel_close = self.buttonBox_2.button( QDialogButtonBox.Close )
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
self.progressBar.setValue( 0 )
self.partProgressBar.setValue( 0 )
self.partProgressBar.setVisible( False )
def keyPressEvent( self, e ):
'''
Reimplemented key press event:
'''
if ( e.modifiers() == Qt.ControlModifier or e.modifiers() == Qt.MetaModifier ) and e.key() == Qt.Key_C:
#selection = self.tblUnique.selectedItems()
items = QString()
if self.myFunction in ( 1, 2 ):
for rec in range( self.tblUnique.rowCount() ):
items.append( self.tblUnique.item( rec, 0 ).text() + "\n" )
else:
for rec in range( self.tblUnique.rowCount() ):
items.append( self.tblUnique.item( rec, 0 ).text() + ":" + self.tblUnique.item( rec, 1 ).text() + "\n" )
if not items.isEmpty():
clip_board = QApplication.clipboard()
clip_board.setText( items )
else:
QDialog.keyPressEvent( self, e )
def update( self ):
self.cmbField.clear()
inputLayer = unicode( self.inShape.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
changedField = changedLayer.dataProvider().fields()
# for Basic statistics (with or without selection)
if self.myFunction == 3:
if changedLayer.selectedFeatureCount() != 0:
self.useSelected.setCheckState( Qt.Checked )
else:
self.useSelected.setCheckState( Qt.Unchecked )
# add all fields in combobox because now we can work with text fields too
for i in changedField:
self.cmbField.addItem( unicode( changedField[i].name() ) )
def accept( self ):
if self.inShape.currentText() == "":
QMessageBox.information( self, self.tr("Error!"), self.tr( "Please specify input vector layer" ) )
elif self.cmbField.isVisible() and self.cmbField.currentText() == "":
QMessageBox.information( self, self.tr("Error!"), self.tr( "Please specify input field" ) )
else:
self.visual( self.inShape.currentText(), self.cmbField.currentText(), self.useSelected.checkState() )
def manageGui( self ):
if self.myFunction == 1: # Check geometry validity
self.setWindowTitle( self.tr( "Check geometry validity" ) )
self.cmbField.setVisible( False )
self.label.setVisible( False )
self.useSelected.setVisible( False )
self.label_2.setText( self.tr( "Geometry errors" ) )
self.label_4.setText( self.tr( "Total encountered errors" ) )
elif self.myFunction == 2: # List unique values
self.setWindowTitle( self.tr( "List unique values" ) )
self.label_2.setText( self.tr( "Unique values" ) )
self.label_4.setText(self.tr( "Total unique values" ) )
self.useSelected.setVisible( False )
elif self.myFunction == 3: # Basic statistics
self.setWindowTitle( self.tr( "Basics statistics" ) )
self.label_2.setText( self.tr( "Statistics output" ) )
self.label_4.setVisible( False )
self.lstCount.setVisible( False )
self.resize( 381, 400 )
elif self.myFunction == 4: # Nearest neighbour analysis
self.setWindowTitle( self.tr( "Nearest neighbour analysis" ) )
self.cmbField.setVisible( False )
self.label.setVisible( False )
self.useSelected.setVisible( False )
self.label_2.setText( self.tr( "Nearest neighbour statistics" ) )
self.label_4.setVisible( False )
self.lstCount.setVisible( False )
self.resize( 381, 200 )
self.inShape.clear()
if self.myFunction == 1:
myList = ftools_utils.getLayerNames( [ QGis.Polygon ] )
elif self.myFunction == 4:
myList = ftools_utils.getLayerNames( [ QGis.Point ] )
else:
myList = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
self.inShape.addItems( myList )
return
#1: Check geometry
#2: List unique values
#3: Basic statistics
#4: Nearest neighbour analysis
def visual( self, myLayer, myField, mySelection ):
vlayer = ftools_utils.getVectorLayerByName( myLayer )
self.tblUnique.clearContents()
self.tblUnique.setRowCount( 0 )
self.lstCount.clear()
self.buttonOk.setEnabled( False )
self.testThread = visualThread( self.iface.mainWindow(), self, self.myFunction, vlayer, myField, mySelection )
QObject.connect( self.testThread, SIGNAL( "runFinished(PyQt_PyObject)" ), self.runFinishedFromThread )
QObject.connect( self.testThread, SIGNAL( "runStatus(PyQt_PyObject)" ), self.runStatusFromThread )
QObject.connect( self.testThread, SIGNAL( "runRange(PyQt_PyObject)" ), self.runRangeFromThread )
QObject.connect( self.testThread, SIGNAL( "runPartRange(PyQt_PyObject)" ), self.runPartRangeFromThread )
QObject.connect( self.testThread, SIGNAL( "runPartStatus(PyQt_PyObject)" ), self.runPartStatusFromThread )
self.cancel_close.setText( self.tr("Cancel") )
QObject.connect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
self.testThread.start()
return True
def cancelThread( self ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
def runFinishedFromThread( self, output ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
result = output[ 0 ]
numRows = len( result )
self.tblUnique.setRowCount( numRows )
if self.myFunction in ( 1, 2 ):
self.tblUnique.setColumnCount( 1 )
for rec in range( numRows ):
item = QTableWidgetItem( result[ rec ] )
self.tblUnique.setItem( rec, 0, item )
else:
self.tblUnique.setColumnCount( 2 )
for rec in range( numRows ):
tmp = result[ rec ].split( ":" )
item = QTableWidgetItem( tmp[ 0 ] )
self.tblUnique.setItem( rec, 0, item )
item = QTableWidgetItem( tmp[ 1 ] )
self.tblUnique.setItem( rec, 1, item )
self.tblUnique.setHorizontalHeaderLabels( [ self.tr("Parameter"), self.tr("Value") ] )
self.tblUnique.horizontalHeader().setResizeMode( 1, QHeaderView.ResizeToContents )
self.tblUnique.horizontalHeader().show()
self.tblUnique.horizontalHeader().setResizeMode( 0, QHeaderView.Stretch )
self.tblUnique.resizeRowsToContents()
self.lstCount.insert( unicode( output[ 1 ] ) )
self.cancel_close.setText( "Close" )
QObject.disconnect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
return True
def runStatusFromThread( self, status ):
self.progressBar.setValue( status )
def runRangeFromThread( self, range_vals ):
self.progressBar.setRange( range_vals[ 0 ], range_vals[ 1 ] )
def runPartStatusFromThread( self, status ):
self.partProgressBar.setValue( status )
if status >= self.part_max:
self.partProgressBar.setVisible( False )
def runPartRangeFromThread( self, range_vals ):
self.part_max = range_vals[ 1 ]
self.partProgressBar.setVisible( True )
self.partProgressBar.setRange( range_vals[ 0 ], range_vals[ 1 ] )
class visualThread( QThread ):
def __init__( self, parentThread, parentObject, function, vlayer, myField, mySelection ):
QThread.__init__( self, parentThread )
self.parent = parentObject
self.running = False
self.myFunction = function
self.vlayer = vlayer
self.myField = myField
self.mySelection = mySelection
# self.total = 0
# self.currentCount = 0
def run( self ):
self.running = True
if self.myFunction == 1: # Check geometry
( lst, cnt ) = self.check_geometry( self.vlayer )
elif self.myFunction == 2: # List unique values
( lst, cnt ) = self.list_unique_values( self.vlayer, self.myField )
elif self.myFunction == 3: # Basic statistics
( lst, cnt ) = self.basic_statistics( self.vlayer, self.myField )
elif self.myFunction == 4: # Nearest neighbour analysis
( lst, cnt ) = self.nearest_neighbour_analysis( self.vlayer )
self.emit( SIGNAL( "runFinished(PyQt_PyObject)" ), ( lst, cnt ) )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
def stop(self):
self.running = False
def list_unique_values( self, vlayer, myField ):
vprovider = vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
index = vprovider.fieldNameIndex( myField )
unique = ftools_utils.getUniqueValues( vprovider, int( index ) )
lstUnique = []
nFeat = len( unique )
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for item in unique:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
lstUnique.append(item.toString().trimmed())
lstCount = len( unique )
return ( lstUnique, lstCount )
def basic_statistics( self, vlayer, myField ):
vprovider = vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
fields = vprovider.fields()
index = vprovider.fieldNameIndex( myField )
feat = QgsFeature()
sumVal = 0.0
meanVal = 0.0
nVal = 0.0
values = []
first = True
nElement = 0
# determine selected field type
if ftools_utils.getFieldType( vlayer, myField ) in (
'String', 'varchar', 'char', 'text'):
fillVal = 0
emptyVal = 0
if self.mySelection: # only selected features
selection = vlayer.selectedFeatures()
nFeat = vlayer.selectedFeatureCount()
if nFeat > 0:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for f in selection:
atMap = f.attributeMap()
lenVal = float( len( atMap[ index ].toString() ) )
if first:
minVal = lenVal
maxVal = lenVal
first = False
else:
if lenVal < minVal: minVal = lenVal
if lenVal > maxVal: maxVal = lenVal
if lenVal != 0.00:
fillVal += 1
else:
emptyVal += 1
values.append( lenVal )
sumVal = sumVal + lenVal
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
else: # there is no selection, process the whole layer
nFeat = vprovider.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vprovider.select( allAttrs )
while vprovider.nextFeature( feat ):
atMap = feat.attributeMap()
lenVal = float( len( atMap[ index ].toString() ) )
if first:
minVal = lenVal
maxVal = lenVal
first = False
else:
if lenVal < minVal: minVal = lenVal
if lenVal > maxVal: maxVal = lenVal
if lenVal != 0.00:
fillVal += 1
else:
emptyVal += 1
values.append( lenVal )
sumVal = sumVal + lenVal
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nVal= float( len( values ) )
if nVal > 0:
meanVal = sumVal / nVal
lstStats = []
lstStats.append( self.tr( "Max. len:" ) + unicode( maxVal ) )
lstStats.append( self.tr( "Min. len:" ) + unicode( minVal ) )
lstStats.append( self.tr( "Mean. len:" ) + unicode( meanVal ) )
lstStats.append( self.tr( "Filled:" ) + unicode( fillVal ) )
lstStats.append( self.tr( "Empty:" ) + unicode( emptyVal ) )
lstStats.append( self.tr( "N:" ) + unicode( nVal ) )
return ( lstStats, [] )
else:
return ( ["Error:No features selected!"], [] )
else: # numeric field
stdVal = 0.00
cvVal = 0.00
rangeVal = 0.00
medianVal = 0.00
maxVal = 0.00
minVal = 0.00
if self.mySelection: # only selected features
selection = vlayer.selectedFeatures()
nFeat = vlayer.selectedFeatureCount()
uniqueVal = ftools_utils.getUniqueValuesCount( vlayer, index, True )
if nFeat > 0:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for f in selection:
atMap = f.attributeMap()
value = float( atMap[ index ].toDouble()[ 0 ] )
if first:
minVal = value
maxVal = value
first = False
else:
if value < minVal: minVal = value
if value > maxVal: maxVal = value
values.append( value )
sumVal = sumVal + value
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
else: # there is no selection, process the whole layer
nFeat = vprovider.featureCount()
uniqueVal = ftools_utils.getUniqueValuesCount( vlayer, index, False )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vprovider.select( allAttrs )
while vprovider.nextFeature( feat ):
atMap = feat.attributeMap()
value = float( atMap[ index ].toDouble()[ 0 ] )
if first:
minVal = value
maxVal = value
first = False
else:
if value < minVal: minVal = value
if value > maxVal: maxVal = value
values.append( value )
sumVal = sumVal + value
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nVal= float( len( values ) )
if nVal > 0.00:
rangeVal = maxVal - minVal
meanVal = sumVal / nVal
if meanVal != 0.00:
for val in values:
stdVal += ( ( val - meanVal ) * ( val - meanVal ) )
stdVal = math.sqrt( stdVal / nVal )
cvVal = stdVal / meanVal
if nVal > 1:
lstVal = values
lstVal.sort()
if ( nVal % 2 ) == 0:
medianVal = 0.5 * ( lstVal[ int( ( nVal - 1 ) / 2 ) ] + lstVal[ int( ( nVal ) / 2 ) ] )
else:
medianVal = lstVal[ int( ( nVal + 1 ) / 2 ) ]
lstStats = []
lstStats.append( self.tr( "Mean:" ) + unicode( meanVal ) )
lstStats.append( self.tr( "StdDev:" ) + unicode( stdVal ) )
lstStats.append( self.tr( "Sum:" ) + unicode( sumVal) )
lstStats.append( self.tr( "Min:" ) + unicode( minVal ) )
lstStats.append( self.tr( "Max:" ) + unicode( maxVal ) )
lstStats.append( self.tr( "N:" ) + unicode( nVal ) )
lstStats.append( self.tr( "CV:" ) + unicode( cvVal ) )
lstStats.append( self.tr( "Number of unique values:" ) + unicode( uniqueVal ) )
lstStats.append( self.tr( "Range:" ) + unicode( rangeVal ) )
lstStats.append( self.tr( "Median:" ) + unicode( medianVal ) )
return ( lstStats, [] )
else:
return ( ["Error:No features selected!"], [] )
def nearest_neighbour_analysis( self, vlayer ):
vprovider = vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
feat = QgsFeature()
neighbour = QgsFeature()
sumDist = 0.00
distance = QgsDistanceArea()
A = vlayer.extent()
A = float( A.width() * A.height() )
index = ftools_utils.createIndex( vprovider )
vprovider.rewind()
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature( feat ):
neighbourID = index.nearestNeighbor( feat.geometry().asPoint(), 2 )[ 1 ]
vprovider.featureAtId( neighbourID, neighbour, True, [] )
nearDist = distance.measureLine( neighbour.geometry().asPoint(), feat.geometry().asPoint() )
sumDist += nearDist
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nVal = vprovider.featureCount()
do = float( sumDist) / nVal
de = float( 0.5 / math.sqrt( nVal / A ) )
d = float( do / de )
SE = float( 0.26136 / math.sqrt( ( nVal * nVal ) / A ) )
zscore = float( ( do - de ) / SE )
lstStats = []
lstStats.append( self.tr( "Observed mean distance:" ) + unicode( do ) )
lstStats.append( self.tr( "Expected mean distance:" ) + unicode( de ) )
lstStats.append( self.tr( "Nearest neighbour index:" ) + unicode( d ) )
lstStats.append( self.tr( "N:" ) + unicode( nVal ) )
lstStats.append( self.tr( "Z-Score:" ) + unicode( zscore ) )
return ( lstStats, [] )
def check_geometry( self, vlayer ):
vprovider = vlayer.dataProvider()
allAttrs = vprovider.attributeIndexes()
vprovider.select( allAttrs )
feat = QgsFeature()
geom = QgsGeometry()
count = 0
lstErrors = []
nFeat = vprovider.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
while vprovider.nextFeature( feat ):
geom = QgsGeometry( feat.geometry() )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if geom.isMultipart():
polygons = geom.asMultiPolygon()
for polygon in polygons:
if not self.isHoleNested( polygon ):
lstErrors.append( self.tr( "Feature %1 contains an unnested hole" ).arg( unicode( feat.id() ) ) )
count += 1
if not self.isPolygonClosed( polygon ):
lstErrors.append( self.tr( "Feature %1 is not closed" ).arg( unicode( feat.id() ) ) )
count += 1
if self.isSelfIntersecting( polygon ):
lstErrors.append( self.tr( "Feature %1 is self intersecting" ).arg( unicode( feat.id() ) ) )
count += 1
if not self.isCorrectOrientation( polygon ):
lstErrors.append( self.tr( "Feature %1 has incorrect node ordering" ).arg( unicode( feat.id() ) ) )
count += 1
else:
geom = geom.asPolygon()
if not self.isHoleNested( geom ):
lstErrors.append( self.tr( "Feature %1 contains an unnested hole" ).arg( unicode( feat.id() ) ) )
count += 1
if not self.isPolygonClosed( geom ):
lstErrors.append( self.tr( "Feature %1 is not closed" ).arg( unicode( feat.id() ) ) )
count += 1
if self.isSelfIntersecting( geom ):
lstErrors.append( self.tr( "Feature %1 is self intersecting" ).arg( unicode( feat.id() ) ) )
count += 1
if not self.isCorrectOrientation( geom ):
lstErrors.append( self.tr( "Feature %1 has incorrect node ordering" ).arg( unicode( feat.id() ) ) )
count += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nFeat )
return ( lstErrors, count )
def isHoleNested( self, polygon ):
if len( polygon ) <= 1:
return True
else:
outer = polygon[ 0 ]
for i in polygon[ 1: len( polygon ) ]:
if not self.arePointsInside( i, outer ):
return False
return True
def arePointsInside( self, inner, outer ):
outer = QgsGeometry().fromPolygon( [ outer ] )
for j in inner:
if not outer.contains(j):
return False
return True
def isPolygonClosed( self, polygon ):
for i in polygon:
first = i[ 0 ]
last = i[ len( i )-1 ]
if not first == last:
return False
return True
def isSelfIntersecting( self, polygon ):
cPart = 0
for h in polygon:
cPart += len(h)
self.emit( SIGNAL( "runPartRange(PyQt_PyObject)" ), ( 0, cPart ) )
nPart = 0
for h in polygon:
for i in range( 0, len(h)-1 ):
self.emit( SIGNAL( "runPartStatus(PyQt_PyObject)" ), nPart )
count = 0
for j in range( i+1, len(h)-1 ):
if QgsGeometry().fromPolyline( [ h[ i ], h[ i + 1 ] ] ).intersects( QgsGeometry().fromPolyline( [ h[ j ], h[ j + 1 ] ] ) ):
count += 1
if count > 2:
self.emit( SIGNAL( "runPartStatus(PyQt_PyObject)" ), cPart )
return True
nPart += 1
self.emit( SIGNAL( "runPartStatus(PyQt_PyObject)" ), cPart )
return False
def isCorrectOrientation( self, polygon ):
outer = True
for h in polygon:
if outer:
outer = False
if not self.isClockwise( h ):
return False
else:
if self.isClockwise(h):
return False
return True
def isClockwise( self, temp ):
area = 0
for pt in range( 0, len( temp ) -1 ):
area += ( temp[ pt ].x() * temp[ pt + 1 ].y() - temp[ pt + 1 ].x() * temp[ pt ].y() )
area = area / 2
if area <= 0:
return True
else:
return False
| gpl-2.0 | -2,410,296,027,648,014,000 | 38.797398 | 133 | 0.603848 | false |
andresriancho/python-aop | aop/aspecttype.py | 1 | 1966 | # -*- coding: utf-8 -*-
"""
python-aop is part of LemonFramework.
python-aop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
python-aop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-aop. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) 2013 Vicente Ruiz <[email protected]>
"""
class AspectType(type):
"""Metaclase para la construcción de aspectos. Añade el método ``pointcut``
a la clase, de forma que permite vincular un advise a un joinpoint."""
def __new__(mcs, name, bases, classdict):
# Preparamos una función que se encarga de realizar el pointcut para
# cualquier método ó atributo de la clase
def pointcut(cls, joinpoint, advise_class, **kwargs):
# Se prepara el punto donde se ejecutará el aspecto
joinpoint_attr = getattr(cls, joinpoint)
# Se obtienen parámetros adicionales para el aspecto
advise_args = () if not 'args' in kwargs else tuple(kwargs['args'])
advise_kwargs = {} if not 'kwargs' in kwargs else dict(kwargs['kwargs'])
# Se crea el advise
advise = advise_class(joinpoint_attr, *advise_args, **advise_kwargs)
# Preparamos un wrapper
def wrapper(self, *args, **kwargs):
return advise(self, *args, **kwargs)
setattr(cls, joinpoint, wrapper)
# Añadimos el método ``pointcut`` a la clase
classdict['pointcut'] = classmethod(pointcut)
return type.__new__(mcs, name, bases, classdict)
| gpl-3.0 | -6,336,378,035,457,480,000 | 44.488372 | 84 | 0.676892 | false |
efiop/dvc | dvc/_debug.py | 1 | 2310 | from contextlib import ExitStack, contextmanager
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import Namespace
@contextmanager
def instrument(html_output=False):
"""Run a statistical profiler"""
try:
from pyinstrument import Profiler # pylint: disable=import-error
except ImportError:
print("Failed to run profiler, pyinstrument is not installed")
yield
return
profiler = Profiler()
profiler.start()
yield
profiler.stop()
if html_output:
profiler.open_in_browser()
return
print(profiler.output_text(unicode=True, color=True))
@contextmanager
def profile(dump_path: str = None):
"""Run a cprofile"""
import cProfile
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
if not dump_path:
prof.print_stats(sort="cumtime")
return
prof.dump_stats(dump_path)
@contextmanager
def debug():
try:
yield
except Exception: # pylint: disable=broad-except
try:
import ipdb as pdb # noqa: T100, pylint: disable=import-error
except ImportError:
import pdb # noqa: T100
pdb.post_mortem()
raise # prevent from jumping ahead
@contextmanager
def debugtools(args: "Namespace" = None, **kwargs):
kw = vars(args) if args else {}
kw.update(kwargs)
with ExitStack() as stack:
if kw.get("pdb"):
stack.enter_context(debug())
if kw.get("cprofile") or kw.get("cprofile_dump"):
stack.enter_context(profile(kw.get("cprofile_dump")))
if kw.get("instrument") or kw.get("instrument_open"):
stack.enter_context(instrument(kw.get("instrument_open", False)))
yield
def add_debugging_flags(parser):
from argparse import SUPPRESS
parser.add_argument(
"--cprofile", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument("--cprofile-dump", help=SUPPRESS)
parser.add_argument(
"--pdb", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument(
"--instrument", action="store_true", default=False, help=SUPPRESS
)
parser.add_argument(
"--instrument-open", action="store_true", default=False, help=SUPPRESS
)
| apache-2.0 | 1,679,155,082,171,922,400 | 24.384615 | 78 | 0.634632 | false |
JarbasAI/JarbasAI | mycroft/messagebus/api.py | 1 | 2458 | from mycroft.messagebus.message import Message
import time
__author__ = "jarbas"
class BusQuery():
def __init__(self, emitter, message_type, message_data=None,
message_context=None):
self.emitter = emitter
self.waiting = False
self.response = Message(None, None, None)
self.query_type = message_type
self.query_data = message_data
self.query_context = message_context
def _end_wait(self, message):
self.response = message
self.waiting = False
def _wait_response(self, timeout):
start = time.time()
elapsed = 0
self.waiting = True
while self.waiting and elapsed < timeout:
elapsed = time.time() - start
time.sleep(0.1)
self.waiting = False
def send(self, response_type=None, timeout=10):
self.response = Message(None, None, None)
if response_type is None:
response_type = self.query_type + ".reply"
self.add_response_type(response_type)
self.emitter.emit(
Message(self.query_type, self.query_data, self.query_context))
self._wait_response(timeout)
return self.response.data
def add_response_type(self, response_type):
self.emitter.once(response_type, self._end_wait)
def get_response_type(self):
return self.response.type
def get_response_data(self):
return self.response.data
def get_response_context(self):
return self.response.context
class BusResponder():
def __init__(self, emitter, response_type, response_data=None,
response_context=None, trigger_messages=None):
self.emitter = emitter
self.response_type = response_type
self.response_data = response_data
self.response_context = response_context
if trigger_messages is None:
trigger_messages = []
for message_type in trigger_messages:
self.listen(message_type)
def listen(self, message_type):
self.emitter.on(message_type, self.respond)
def update_response(self, data=None, context=None):
if data is not None:
self.response_data = data
if context is not None:
self.response_context = context
def respond(self, message):
self.emitter.emit(Message(self.response_type, self.response_data,
self.response_context))
| gpl-3.0 | 1,356,580,934,561,058,300 | 31.342105 | 74 | 0.617168 | false |
vlegoff/tsunami | src/primaires/affection/editeurs/__init__.py | 1 | 1618 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les différents éditeurs"""
| bsd-3-clause | 621,346,735,999,611,600 | 49.5 | 79 | 0.780322 | false |
USGSDenverPychron/pychron | pychron/canvas/canvas2D/stage_canvas.py | 1 | 5114 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Instance, Tuple, Color, Bool, Any, Float, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.canvas.canvas2D.crosshairs_overlay import CrosshairsOverlay, SimpleCrosshairsOverlay
from pychron.canvas.canvas2D.map_canvas import MapCanvas
DIRECTIONS = {'Left': ('x', -1), 'Right': ('x', 1),
'Down': ('y', -1), 'Up': ('y', 1)}
class StageCanvas(MapCanvas):
crosshairs_overlay = Instance(SimpleCrosshairsOverlay)
crosshairs_color = Color('black')
stage_position = Property(depends_on='_stage_position')
_stage_position = Tuple(Float, Float)
desired_position = Property(depends_on='_desired_position')
_desired_position = Any
show_current_position = Bool(True)
current_position = Property(depends_on='cur_pos')
cur_pos = Tuple(Float(0), Float(0))
show_desired_position = Bool(True)
desired_position_color = Color('green')
def get_stage_screen_position(self):
return self.map_screen([self._stage_position])[0]
def get_stage_position(self):
return self._stage_position
def set_stage_position(self, x, y):
"""
"""
if x is not None and y is not None:
self._stage_position = (x, y)
self.invalidate_and_redraw()
def clear_desired_position(self):
self._desired_position = None
self.request_redraw()
def set_desired_position(self, x, y):
"""
"""
self._desired_position = (x, y)
self.request_redraw()
# ===============================================================================
# interactor
# ===============================================================================
def normal_mouse_move(self, event):
"""
"""
self.cur_pos = (event.x, event.y)
if self.valid_position(event.x, event.y):
event.window.set_pointer(self.cross_pointer)
else:
event.window.set_pointer(self.normal_pointer)
event.handled = True
# self.request_redraw()
def normal_mouse_enter(self, event):
"""
"""
event.window.set_pointer(self.cross_pointer)
event.handled = True
def normal_mouse_leave(self, event):
"""
"""
event.window.set_pointer(self.normal_pointer)
self.request_redraw()
event.handled = True
def normal_key_pressed(self, event):
c = event.character
if c in ('Left', 'Right', 'Up', 'Down'):
ax_key, direction = DIRECTIONS[c]
direction = self._calc_relative_move_direction(c, direction)
distance = 5 if event.shift_down else 1
self.stage_manager.relative_move(ax_key, direction, distance)
event.handled = True
elif c in ('a', 'A'):
self.stage_manager.accept_point()
def key_released(self, char):
"""
called from outside by StageCompnentEditor
"""
self.stage_manager.key_released()
# ===============================================================================
# private
# ===============================================================================
def _calc_relative_move_direction(self, char, direction):
return direction
def _add_crosshairs(self, klass=None):
if klass is None:
klass = CrosshairsOverlay
ch = klass(component=self)
self.crosshairs_overlay = ch
self.overlays.append(ch)
# ===============================================================================
# property get/set
# ===============================================================================
def _get_current_position(self):
md = self.map_data(self.cur_pos)
return self.cur_pos[0], md[0], self.cur_pos[1], md[1]
def _get_stage_position(self):
"""
"""
return self.map_screen([self._stage_position])[0]
def _get_desired_position(self):
"""
"""
if self._desired_position is not None:
x, y = self.map_screen([self._desired_position])[0]
return x, y
# ============= EOF =============================================
| apache-2.0 | 5,148,437,984,851,985,000 | 33.554054 | 97 | 0.51447 | false |
AndrasKovacs/dawg-gen | dawg_gen.py | 1 | 7469 | #!/usr/bin/env python
import os
import array as ar
import hashlib
from sys import argv
from copy import copy
from collections import defaultdict
from time import clock
######################## Read/check word list ###############################
print
if len(argv) != 2:
print "Usage: dawg_gen.py [word list path]"
exit(1)
filename = argv[1]
time = clock()
print "Checking word list...",
try:
wordlist = open(filename).read().split()
except IOError:
print "File not found."
exit(1)
if not all(all(c.isupper() for c in w) for w in wordlist) or any(b < a for a,b in zip(wordlist, wordlist[1:])):
print
print "Invalid word list; please include alphabetically sorted uppercase words delimited by space or newline."
exit(1)
print "OK".ljust(13),
print "finished in {:.4} seconds.".format(clock()-time)
######################## Build Trie #########################################
class SeqTrie(object):
def __init__(self, init = tuple(), is_end = False, val = "", end_of_list = False):
self.children = []
self.is_end = is_end
self.val = val
self.end_of_list = end_of_list
for x in init:
self.add(x)
def add(self, word):
for c in word:
if not self.children or self.children[-1].val != c: #only works on pre-sorted word lists!
self.children.append(SeqTrie())
self = self.children[-1]
self.val = c
self.is_end = True
def __iter__(self):
for x in self.children:
for y in x.__iter__():
yield y
yield self
t = clock()
print "Building trie...".ljust(35),
trie = SeqTrie(wordlist)
print "finished in {:.4} seconds.".format(clock()-t)
################### Generate hashes/merge nodes, ###########################
t = clock()
print "Merging redundant nodes...".ljust(35),
node_dict = {}
for x in trie:
hash_str = "".join((str(x.is_end), x.val, "".join(y.hash for y in x.children)))
x.hash = hashlib.md5(hash_str).digest()
if x.hash not in node_dict:
node_dict[x.hash] = x
for i,y in enumerate(x.children):
x.children[i] = node_dict[y.hash]
x.children = tuple(sorted(x.children))
clist_dict = {x.children: x.children for x in node_dict.itervalues()}
for x in node_dict.itervalues():
x.children = clist_dict[x.children]
print "finished in {:.4} seconds.".format(clock()-t)
########################## Merge child lists ###############################
t = clock()
print "Merging child lists...".ljust(35),
inverse_dict = defaultdict(list)
compress_dict = {x:[x] for x in clist_dict.itervalues() if x}
for clist in clist_dict.itervalues():
for node in clist:
inverse_dict[node].append(clist)
for x in inverse_dict:
inverse_dict[x].sort( key = lambda x: (len(x), sum(len(inverse_dict[y]) for y in x) ))
for clist in sorted(compress_dict.keys(), key = lambda x:(len(x), -1*sum(len(inverse_dict[y]) for y in x)), reverse=True):
for other in min((inverse_dict[x] for x in clist), key = len):
if compress_dict[other] and set(clist) < set(compress_dict[other][-1]):
compress_dict[other].append(clist)
compress_dict[clist] = False
break
compress_dict = {x:l for x,l in compress_dict.iteritems() if l}
print "finished in {:.4} seconds.".format(clock()-t)
#################### Create compressed trie structure #######################
t = clock()
print "Creating compressed node array...".ljust(35),
end_node = SeqTrie(init = (), is_end = False, val = "", end_of_list = True)
end_node.children = ()
array = [0,]*(sum(len(x[0]) for x in compress_dict.itervalues()) + 1)
clist_indices = {}
array[0] = end_node
clist_indices[()] = 0
pos = 1
for stuff in compress_dict.itervalues():
if len(stuff) > 1:
sort = [0]*26
for i, clist in enumerate(stuff):
for y in clist:
sort[ord(y.val) - ord('A')] = (i, y)
stuff.append([n for i,n in sorted(x for x in sort if x)])
for clist in stuff[:-1]:
clist_indices[clist] = pos + len(stuff[0]) - len(clist)
else:
clist_indices[stuff[0]] = pos
clist = stuff[-1]
array[pos:pos+len(clist)] = map(copy, clist)
pos += len(clist)
array[pos-1].end_of_list = True
for x in array:
x.children = clist_indices[x.children]
root = clist_indices[trie.children]
root_node = SeqTrie(init = (), is_end = False, val = "", end_of_list = True)
root_node.children = root
array.append(root_node)
print "finished in {:.4} seconds.".format(clock()-t)
######################### check trie ###################################
t = clock()
print "Checking output correctness...",
def extract_words(array, i=root, carry = ""):
node = array[i]
if not node.val:
return
while True:
for x in extract_words(array, node.children, carry + node.val):
yield x
if node.is_end:
yield carry + node.val
if node.end_of_list: break
i += 1
node = array[i]
if set(extract_words(array)) == set(wordlist):
print "OK".ljust(4), "finished in {:.4} seconds.".format(clock()-t)
else:
print "INVALID OUTPUT: trie does not match original word list."
exit(1)
print
print "Compression finished in {:.4} seconds.".format(clock()-time)
print "Number of nodes:", len(array)
print
################## export as bitpacked array binaries #########################
def prompt_filename():
while True:
inp = raw_input("Enter export path: ")
if os.path.exists(inp):
while True:
choice = raw_input("File already exists. Overwrite? ")
if choice in ('y', 'Y'): return inp
if choice in ('n', 'N'): break
else:
return inp
def prompt_packing_mode():
ok_3b = len(array) <= 2**17
ok_4b = len(array) <= 2**22
while True:
print
print "Choose node size:"
print " (3) bytes" + " -> UNAVAILABLE: number of nodes above 2**17-1 or 131071"*(not ok_3b)
print " (4) bytes" + " -> UNAVAILABLE: number of nodes above 2**22-1 or 4194303"*(not ok_4b)
print
mode = raw_input("> ")
if mode in ok_3b*"3" + ok_4b*"4":
return mode
inp = prompt_filename()
mode = prompt_packing_mode()
t = clock()
print
print "Exporting as bit-packed array...",
if mode == "4":
output = ar.array('L', [0]*len(array))
for i,x in enumerate(array):
output[i] |= (x.children << 10)
output[i] |= ((ord(x.val) if x.val else 0) << 2)
output[i] |= (x.end_of_list<<1)
output[i] |= (x.is_end)
outfile = open(inp, "wb")
output.tofile(outfile)
outfile.close()
print "finished in {:.4} seconds.".format(clock()-t)
elif mode == "3":
output = ar.array('B', [0]*(len(array)*3))
for i,x in enumerate(array):
i *= 3
a, b, c = i, i+1, i+2
output[a] = (x.children & 0b00000000011111111)
output[b] = (x.children & 0b01111111100000000) >> 8
output[c] = (x.children & 0b10000000000000000) >> 9
output[c] |= ((ord(x.val) - ord('A') + 1 if x.val else 0) << 2) # 0 is reserved for root and end-of-trie nodes
output[c] |= (x.end_of_list<<1)
output[c] |= (x.is_end)
outfile = open(inp, "wb")
output.tofile(outfile)
outfile.close()
print "finished in {:.4} seconds.".format(clock()-t)
| mit | -984,014,338,422,908,400 | 28.405512 | 122 | 0.560718 | false |
ministryofjustice/collectd-ntp | setup.py | 1 | 1397 | import os
from setuptools import setup
def pkg_dir(path):
return os.path.join(os.path.dirname(__file__), path)
with open(pkg_dir('VERSION'), 'r') as f:
version = f.read().strip()
with open(pkg_dir('README.rst'), 'r') as f:
readme = f.read()
setup(
name='collectd-ntp',
version=version,
py_modules=['ntpoffset'],
install_requires=['ntplib>=0.3.3,<1', 'dnspython>=1.12.0,<2'],
author='Andy Driver',
author_email='[email protected]',
maintainer='MOJDS',
url='https://github.com/ministryofjustice/collectd-ntp',
description='NTP offsets plugin for collectd',
long_description=readme,
license='LICENSE',
keywords=['python', 'ministryofjustice', 'collectd', 'ntp'],
test_suite='tests',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization']
)
| mit | 2,740,606,144,956,291,000 | 30.044444 | 71 | 0.614173 | false |
arruah/ensocoin | qa/rpc-tests/txPerf.py | 1 | 10160 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
import pdb
import binascii
import time
import math
import json
import logging
logging.basicConfig(format='%(asctime)s.%(levelname)s: %(message)s', level=logging.INFO)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class TransactionPerformanceTest(BitcoinTestFramework):
def setup_chain(self,bitcoinConfDict=None, wallets=None):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3, bitcoinConfDict, wallets)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir,timewait=60*60)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 17992)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
# Connect each node to the other
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def generate_utxos(self,node, count,amt=0.1):
if type(node) == type(0): # Convert a node index to a node object
node = self.nodes[node]
addrs = []
for i in range(0,count):
addr = node.getnewaddress()
addrs.append(addr)
node.sendtoaddress(addr, amt)
node.generate(1)
self.sync_all()
def signingPerformance(self,node, inputs,outputs,skip=100):
fil = open("signPerf.csv","w")
logging.info("tx len, # inputs, # outputs, time")
print ("fieldNames = ['tx len', '# inputs', '# outputs', 'time']", file=fil)
print ("data = [", file=fil)
for i in range(0,len(inputs),skip):
for j in range(0,len(outputs),skip):
try:
if i==0: i=1
if j==0: j=1
(txn,inp,outp,txid) = split_transaction(node, inputs[0:i], outputs[0:j], txfee=DEFAULT_TX_FEE_PER_BYTE*10, sendtx=False)
except e:
logging.info("%d, %d, %d, split error" % (txLen,len(inp),len(outp)))
print("[",txLen,",",len(inp),",",len(outp),",",'"split error:"', str(e),'"],', file=fil)
continue
try:
s = str(txn)
#print ("tx len: ", len(s))
start=time.time()
signedtxn = node.signrawtransaction(s)
end=time.time()
txLen = len(binascii.unhexlify(signedtxn["hex"])) # Get the actual transaction size for better tx fee estimation the next time around
logging.info("%d, %d, %d, %f" % (txLen,len(inp),len(outp),end-start))
print("[",txLen,",",len(inp),",",len(outp),",",end-start,"],",file=fil)
except:
logging.info("%d, %d, %d, %s" % (txLen,len(inp),len(outp),'"timeout"'))
print (txLen,",",len(inp),",",len(outp),",","timeout")
print ("[",txLen,",",len(inp),",",len(outp),",",'"timeout"],',file=fil)
fil.flush()
print("]",file=fil)
fil.close()
def validatePerformance(self,node, inputCount,outputs,skip=100):
fil = open("validatePerf.csv","w")
print("tx len, # inputs, # outputs, time")
print ("fieldNames = ['tx len', '# inputs', '# outputs', 'time']",file=fil)
print ("data = [",file=fil)
for i in range(0,inputCount,skip):
for j in range(0,len(outputs),skip):
print("ITER: ", i, " x ", j)
wallet = node.listunspent()
wallet.sort(key=lambda x: x["amount"],reverse=True)
while len(wallet) < i: # Make a bunch more inputs
(txn,inp,outp,txid) = split_transaction(node, [wallet[0]], outputs, txfee=DEFAULT_TX_FEE_PER_BYTE*10)
self.sync_all()
wallet = node.listunspent()
wallet.sort(key=lambda x: x["amount"],reverse=True)
try:
if i==0: i=1
if j==0: j=1
(txn,inp,outp,txid) = split_transaction(node, wallet[0:i], outputs[0:j], txfee=DEFAULT_TX_FEE_PER_BYTE*10, sendtx=True)
except e:
logging.info("split error: %s" % str(e))
print("[ 'sign',",0,",",i,",",j,",","'split error:", str(e),"'],",file=fil)
pdb.set_trace()
continue
time.sleep(4) # give the transaction time to propagate so we generate tx validation data separately from block validation data
startTime = time.time()
node.generate(1)
elapsedTime = time.time() - startTime
logging.info("generate time: %f" % elapsedTime)
txLen = len(binascii.unhexlify(txn)) # Get the actual transaction size for better tx fee estimation the next time around
print("[ 'gen',",txLen,",",len(inp),",",len(outp),",",elapsedTime,"],",file=fil)
startTime = time.time()
self.sync_all()
elapsedTime = time.time() - startTime
logging.info("Sync time: %f" % elapsedTime)
print("[ 'sync',",txLen,",",len(inp),",",len(outp),",",elapsedTime,"],",file=fil)
print("]",file=fil)
fil.close()
def largeOutput(self):
"""This times the validation of 1 to many and many to 1 transactions. Its not needed to be run as a daily unit test"""
print("synchronizing")
self.sync_all()
node = self.nodes[0]
start = time.time()
print("generating addresses")
if 1:
addrs = [ node.getnewaddress() for _ in range(20000)]
f = open("addrs.txt","w")
f.write(str(addrs))
f.close()
print("['Benchmark', 'generate 20000 addresses', %f]" % (time.time()-start))
else:
import addrlist
addrs = addrlist.addrlist
wallet = node.listunspent()
wallet.sort(key=lambda x: x["amount"],reverse=True)
(txn,inp,outp,txid) = split_transaction(node, wallet[0], addrs[0:10000], txfee=DEFAULT_TX_FEE_PER_BYTE, sendtx=True)
txLen = len(binascii.unhexlify(txn)) # Get the actual transaction size for better tx fee estimation the next time around
print("[ 'gen',",txLen,",",len(inp),",",len(outp), "],")
startTime = time.time()
node.generate(1)
elapsedTime = time.time() - startTime
print ("Generate time: ", elapsedTime)
startTime = time.time()
print ("synchronizing")
self.sync_all()
elapsedTime = time.time() - startTime
print("Sync time: ", elapsedTime)
# Now join with a tx with a huge number of inputs
wallet = self.nodes[0].listunspent()
wallet.sort(key=lambda x: x["amount"])
(txn,inp,outp,txid) = split_transaction(node, wallet[0:10000], [addrs[0]], txfee=DEFAULT_TX_FEE_PER_BYTE, sendtx=True)
txLen = len(binascii.unhexlify(txn)) # Get the actual transaction size for better tx fee estimation the next time around
print("[ 'gen',",txLen,",",len(inp),",",len(outp), "],")
def run_test(self):
TEST_SIZE=200 # To collect a lot of data points, set the TEST_SIZE to 2000
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(100)
self.sync_all()
self.nodes[2].generate(21) # So we can access 10 txouts from nodes[0]
self.sync_all()
# This times the validation of 1 to many and many to 1 transactions. Its not needed to be run as a unit test
# self.largeOutput()
print("Generating new addresses... will take awhile")
start = time.time()
addrs = [ self.nodes[0].getnewaddress() for _ in range(TEST_SIZE+1)]
print("['Benchmark', 'generate 2001 addresses', %f]" % (time.time()-start))
wallet = self.nodes[0].listunspent()
wallet.sort(key=lambda x: x["amount"],reverse=True)
for w in wallet[0:2]:
split_transaction(self.nodes[0], [w], addrs)
self.nodes[0].generate(1)
self.sync_all()
#tips = self.nodes[0].getchaintips()
#print ("TIPS:\n", tips)
#lastBlock = self.nodes[0].getblock(tips[0]["hash"])
#print ("LAST BLOCK:\n", lastBlock)
#txoutsetinfo = self.nodes[0].gettxoutsetinfo()
#print ("UTXOS:\n", txoutsetinfo)
self.nodes[0].generate(1)
self.sync_all()
wallet = self.nodes[0].listunspent()
wallet.sort(key=lambda x: x["amount"],reverse=True)
logging.info("wallet length: %d" % len(wallet))
logging.info("addrs length: %d" % len(addrs))
# To collect a lot of data points, set the interval to 100 or even 10 and run overnight
interval = 100 # TEST_SIZE/2
# self.signingPerformance(self.nodes[0], wallet[0:TEST_SIZE],addrs[0:TEST_SIZE],interval)
self.validatePerformance(self.nodes[0], TEST_SIZE,addrs,interval)
if __name__ == '__main__':
tpt = TransactionPerformanceTest()
bitcoinConf = {
"debug":["net","blk","thin","lck","mempool","req","bench","evict"],
"blockprioritysize":2000000 # we don't want any transactions rejected due to insufficient fees...
}
tpt.main(["--nocleanup"],bitcoinConf)
def Test():
tpt = TransactionPerformanceTest()
bitcoinConf = {
"debug":["bench"],
"blockprioritysize":2000000 # we don't want any transactions rejected due to insufficient fees...
}
tpt.main(["--nocleanup","--tmpdir=/ramdisk/test"],bitcoinConf)
| mit | -7,180,355,255,265,386,000 | 40.8107 | 148 | 0.584055 | false |
idiles/opendict | lib/extra/html2text.py | 1 | 4084 | """
html2text.py
convert an html doc to text
"""
# system libraries
import os, sys, string, time, getopt
import re
WIDTH = 80
def tag_replace (data,center,indent, use_ansi = 0):
data = re.sub ("\s+", " ", data)
data = re.sub ("(?s)<!--.*?-->", "", data)
data = string.replace (data, "\n", " ")
output = []
# modified 6/17/99 splits on all cases of "img" tags
# imgs = re.split ("(?s)(<img.*?>)", data)
imgs = re.split ("(?si)(<img.*?>)", data)
for img in imgs:
if string.lower(img[:4]) == "<img":
alt = re.search ("(?si)alt\s*=\s*\"([^\"]*)\"", img)
if not alt:
alt = re.search ("(?si)alt\s*=([^\s]*)", img)
if alt:
output.append ("%s" % img[alt.start(1):alt.end(1)])
else:
output.append ("[img]")
else:
output.append (img)
data = string.join (output, "")
data = re.sub ("(?i)<br>", "\n", data)
data = re.sub ("(?i)<hr[^>]*>", "\n" + "-"*50 + "\n", data)
data = re.sub ("(?i)<li>", "\n* ", data)
if use_ansi:
data = re.sub ("(?i)<h[0-9]>", "\n[32m", data)
else:
data = re.sub ("(?i)<h[0-9]>", "\n", data)
if use_ansi:
data = re.sub ("(?i)</h[0-9]>", "[0m\n", data)
else:
data = re.sub ("(?i)</h[0-9]>", "\n", data)
data = re.sub ("(?i)<ul>", "\n<UL>\n", data)
data = re.sub ("(?i)</ul>", "\n</UL>\n", data)
data = re.sub ("(?i)<center>", "\n<CENTER>\n", data)
data = re.sub ("(?i)</center>", "\n</CENTER>\n", data)
data = re.sub ("(?i)</div>", "\n", data)
if use_ansi:
data = re.sub ("(?i)<b>", "[1m", data)
data = re.sub ("(?i)</b>", "[0m", data)
data = re.sub ("(?i)<i>", "[2m", data)
data = re.sub ("(?i)</i>", "[0m", data)
data = re.sub ("(?i)<title>", "\n<CENTER>\n[31m", data)
data = re.sub ("(?i)</title>", "[0m\n</CENTER>\n", data)
else:
data = re.sub ("(?i)<title>", "\n<CENTER>\n", data)
data = re.sub ("(?i)</title>", "\n</CENTER>\n", data)
data = re.sub ("(?i)<p>", "\n", data)
data = re.sub ("(?i)<tr[^>]*>", "\n", data)
data = re.sub ("(?i)</table>", "\n", data)
data = re.sub ("(?i)<td[^>]*>", "\t", data)
data = re.sub ("(?i)<th[^>]*>", "\t", data)
data = re.sub (" *\n", "\n", data)
lines = string.split (data, "\n")
output = []
for line in lines:
if line == "<UL>":
indent = indent + 1
elif line == "</UL>":
indent = indent - 1
if indent < 0: indent = 0
elif line == "<CENTER>":
center = center + 1
elif line == "</CENTER>":
center = center - 1
if center < 0: center = 0
else:
if center:
line = " "*indent + string.strip(line)
nline = re.sub("\[.*?m", "", line)
nline = re.sub ("<[^>]*>", "", nline)
c = WIDTH/2 - (len (nline) / 2)
output.append (" "*c + line)
else:
output.append (" "*indent + line)
data = string.join (output, "\n")
data = re.sub (" *\n", "\n", data)
data = re.sub ("\n\n\n*", "\n\n", data)
data = re.sub ("<[^>]*>", "", data)
return (data, center, indent)
def html2text (data, use_ansi = 0, is_latin1 = 0):
pre = re.split("(?s)(<pre>[^<]*</pre>)", data)
out = []
indent = 0
center = 0
for part in pre:
if part[:5] != "<pre>":
(res, center, indent) = tag_replace (part,center,indent, use_ansi)
out.append (res)
else:
part = re.sub("(?i)</*pre>", "", part)
out.append (part)
data = string.join (out)
data = re.sub (">", ">", data)
data = re.sub ("<", "<", data)
data = re.sub (" ", " ", data)
return data
def usage(progname):
print "usage: %s --help <htmlfile>" % progname
print __doc__
def main(argc, argv):
progname = argv[0]
alist, args = getopt.getopt(argv[1:], "", ["help"])
for (field, val) in alist:
if field == "--help":
usage(progname)
return
if len(args):
file = args[0]
else:
return
progname = argv[0]
fp = open (file)
data = fp.read()
fp.close()
if data:
print (html2text(data))
else:
print "Document contained no data"
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| gpl-2.0 | 3,743,144,919,791,192,600 | 26.594595 | 72 | 0.481636 | false |
xuru/pyvisdk | pyvisdk/do/host_authentication_manager_info.py | 1 | 1086 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostAuthenticationManagerInfo(vim, *args, **kwargs):
'''The HostAuthenticationManagerInfo data object provides access to authentication
information for the ESX host.'''
obj = vim.client.factory.create('ns0:HostAuthenticationManagerInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'authConfig' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 8,138,604,160,572,094,000 | 30.970588 | 124 | 0.616943 | false |
timpalpant/calibre | src/calibre/ebooks/metadata/cli.py | 1 | 9135 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
'''
ebook-meta
'''
import sys, os
from calibre.utils.config import StringConfig
from calibre.customize.ui import metadata_readers, metadata_writers, force_identifiers
from calibre.ebooks.metadata.meta import get_metadata, set_metadata
from calibre.ebooks.metadata import string_to_authors, authors_to_sort_string, \
title_sort, MetaInformation
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre import prints
from calibre.utils.date import parse_date
USAGE=_('%prog ebook_file [options]\n') + \
_('''
Read/Write metadata from/to ebook files.
Supported formats for reading metadata: {0}
Supported formats for writing metadata: {1}
Different file types support different kinds of metadata. If you try to set
some metadata on a file type that does not support it, the metadata will be
silently ignored.
''')
def config():
c = StringConfig('')
c.add_opt('title', ['-t', '--title'],
help=_('Set the title.'))
c.add_opt('authors', ['-a', '--authors'],
help=_('Set the authors. Multiple authors should be separated '
'by the & character. Author names should be in the order '
'Firstname Lastname.'))
c.add_opt('title_sort', ['--title-sort'],
help=_('The version of the title to be used for sorting. '
'If unspecified, and the title is specified, it will '
'be auto-generated from the title.'))
c.add_opt('author_sort', ['--author-sort'],
help=_('String to be used when sorting by author. '
'If unspecified, and the author(s) are specified, it will '
'be auto-generated from the author(s).'))
c.add_opt('cover', ['--cover'],
help=_('Set the cover to the specified file.'))
c.add_opt('comments', ['-c', '--comments'],
help=_('Set the ebook description.'))
c.add_opt('publisher', ['-p', '--publisher'],
help=_('Set the ebook publisher.'))
c.add_opt('category', ['--category'],
help=_('Set the book category.'))
c.add_opt('series', ['-s', '--series'],
help=_('Set the series this ebook belongs to.'))
c.add_opt('series_index', ['-i', '--index'],
help=_('Set the index of the book in this series.'))
c.add_opt('rating', ['-r', '--rating'],
help=_('Set the rating. Should be a number between 1 and 5.'))
c.add_opt('isbn', ['--isbn'],
help=_('Set the ISBN of the book.'))
c.add_opt('identifiers', ['--identifier'], action='append',
help=_('Set the identifiers for the book, can be specified multiple times.'
' For example: --identifier uri:http://acme.com --identifier isbn:12345'
' To remove an identifier, specify no value, --identifier isbn:'
' Note that for EPUB files, an identifier marked as the package identifier cannot be removed.'))
c.add_opt('tags', ['--tags'],
help=_('Set the tags for the book. Should be a comma separated list.'))
c.add_opt('book_producer', ['-k', '--book-producer'],
help=_('Set the book producer.'))
c.add_opt('language', ['-l', '--language'],
help=_('Set the language.'))
c.add_opt('pubdate', ['-d', '--date'],
help=_('Set the published date.'))
c.add_opt('get_cover', ['--get-cover'],
help=_('Get the cover from the ebook and save it at as the '
'specified file.'))
c.add_opt('to_opf', ['--to-opf'],
help=_('Specify the name of an OPF file. The metadata will '
'be written to the OPF file.'))
c.add_opt('from_opf', ['--from-opf'],
help=_('Read metadata from the specified OPF file and use it to '
'set metadata in the ebook. Metadata specified on the '
'command line will override metadata read from the OPF file'))
c.add_opt('lrf_bookid', ['--lrf-bookid'],
help=_('Set the BookID in LRF files'))
return c
def filetypes():
readers = set([])
for r in metadata_readers():
readers = readers.union(set(r.file_types))
return readers
def option_parser():
writers = set([])
for w in metadata_writers():
writers = writers.union(set(w.file_types))
ft, w = ', '.join(sorted(filetypes())), ', '.join(sorted(writers))
return config().option_parser(USAGE.format(ft, w))
def do_set_metadata(opts, mi, stream, stream_type):
mi = MetaInformation(mi)
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
from_opf = getattr(opts, 'from_opf', None)
if from_opf is not None:
from calibre.ebooks.metadata.opf2 import OPF
opf_mi = OPF(open(from_opf, 'rb')).to_book_metadata()
mi.smart_update(opf_mi)
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'from_opf', 'authors', 'title_sort',
'author_sort', 'get_cover', 'cover', 'tags',
'lrf_bookid', 'identifiers'):
continue
val = getattr(opts, pref.name, None)
if val is not None:
setattr(mi, pref.name, val)
if getattr(opts, 'authors', None) is not None:
mi.authors = string_to_authors(opts.authors)
mi.author_sort = authors_to_sort_string(mi.authors)
if getattr(opts, 'author_sort', None) is not None:
mi.author_sort = opts.author_sort
if getattr(opts, 'title_sort', None) is not None:
mi.title_sort = opts.title_sort
elif getattr(opts, 'title', None) is not None:
mi.title_sort = title_sort(opts.title)
if getattr(opts, 'tags', None) is not None:
mi.tags = [t.strip() for t in opts.tags.split(',')]
if getattr(opts, 'series', None) is not None:
mi.series = opts.series.strip()
if getattr(opts, 'series_index', None) is not None:
mi.series_index = float(opts.series_index.strip())
if getattr(opts, 'pubdate', None) is not None:
mi.pubdate = parse_date(opts.pubdate, assume_utc=False, as_utc=False)
if getattr(opts, 'identifiers', None):
val = {k.strip():v.strip() for k, v in (x.partition(':')[0::2] for x in opts.identifiers)}
if val:
orig = mi.get_identifiers()
orig.update(val)
val = {k:v for k, v in orig.iteritems() if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:
ext = os.path.splitext(opts.cover)[1].replace('.', '').upper()
mi.cover_data = (ext, open(opts.cover, 'rb').read())
with force_identifiers:
set_metadata(stream, mi, stream_type)
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(args)
if len(args) < 2:
parser.print_help()
prints(_('No file specified'), file=sys.stderr)
return 1
path = args[1]
stream_type = os.path.splitext(path)[1].replace('.', '').lower()
trying_to_set = False
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'get_cover'):
continue
if getattr(opts, pref.name) is not None:
trying_to_set = True
break
with open(path, 'rb') as stream:
mi = get_metadata(stream, stream_type, force_read_metadata=True)
if trying_to_set:
prints(_('Original metadata')+'::')
metadata = unicode(mi)
if trying_to_set:
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata, safe_encode=True)
if trying_to_set:
with open(path, 'r+b') as stream:
do_set_metadata(opts, mi, stream, stream_type)
stream.seek(0)
stream.flush()
lrf = None
if stream_type == 'lrf':
if opts.lrf_bookid is not None:
lrf = LRFMetaFile(stream)
lrf.book_id = opts.lrf_bookid
mi = get_metadata(stream, stream_type, force_read_metadata=True)
prints('\n' + _('Changed metadata') + '::')
metadata = unicode(mi)
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata, safe_encode=True)
if lrf is not None:
prints('\tBookID:', lrf.book_id)
if opts.to_opf is not None:
from calibre.ebooks.metadata.opf2 import OPFCreator
opf = OPFCreator(os.getcwdu(), mi)
with open(opts.to_opf, 'wb') as f:
opf.render(f)
prints(_('OPF created in'), opts.to_opf)
if opts.get_cover is not None:
if mi.cover_data and mi.cover_data[1]:
with open(opts.get_cover, 'wb') as f:
f.write(mi.cover_data[1])
prints(_('Cover saved to'), f.name)
else:
prints(_('No cover found'), file=sys.stderr)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 4,562,069,038,732,612,600 | 40.148649 | 117 | 0.573508 | false |
rs2/pandas | pandas/core/indexers.py | 1 | 14164 | """
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if isinstance(indexer, tuple):
if len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value):
# boolean with truth values == len of the value is ok too
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel=3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
| bsd-3-clause | 7,059,395,294,665,719,000 | 27.556452 | 88 | 0.578791 | false |
Shihta/python-novaclient | novaclient/tests/fixture_data/security_groups.py | 1 | 3693 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.openstack.common import jsonutils
from novaclient.tests import fakes
from novaclient.tests.fixture_data import base
class Fixture(base.Fixture):
base_url = 'os-security-groups'
def setUp(self):
super(Fixture, self).setUp()
security_group_1 = {
"name": "test",
"description": "FAKE_SECURITY_GROUP",
"tenant_id": "4ffc664c198e435e9853f2538fbcd7a7",
"id": 1,
"rules": [
{
"id": 11,
"group": {},
"ip_protocol": "TCP",
"from_port": 22,
"to_port": 22,
"parent_group_id": 1,
"ip_range": {"cidr": "10.0.0.0/8"}
},
{
"id": 12,
"group": {
"tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582",
"name": "test2"
},
"ip_protocol": "TCP",
"from_port": 222,
"to_port": 222,
"parent_group_id": 1,
"ip_range": {}
}
]
}
security_group_2 = {
"name": "test2",
"description": "FAKE_SECURITY_GROUP2",
"tenant_id": "272bee4c1e624cd4a72a6b0ea55b4582",
"id": 2,
"rules": []
}
get_groups = {'security_groups': [security_group_1, security_group_2]}
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.url(),
json=get_groups,
headers=headers)
get_group_1 = {'security_group': security_group_1}
self.requests.register_uri('GET', self.url(1),
json=get_group_1,
headers=headers)
self.requests.register_uri('DELETE', self.url(1), status_code=202)
def post_os_security_groups(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['security_group']
fakes.assert_has_keys(body['security_group'],
required=['name', 'description'])
return {'security_group': security_group_1}
self.requests.register_uri('POST', self.url(),
json=post_os_security_groups,
headers=headers,
status_code=202)
def put_os_security_groups_1(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['security_group']
fakes.assert_has_keys(body['security_group'],
required=['name', 'description'])
return body
self.requests.register_uri('PUT', self.url(1),
json=put_os_security_groups_1,
headers=headers,
status_code=205)
| apache-2.0 | 2,774,355,458,957,496,300 | 36.30303 | 78 | 0.486325 | false |
XENON1T/pax | pax/plugins/io/MongoDB.py | 1 | 42439 | """Interfacing to MongoDB
MongoDB is used as a data backend within the DAQ. For example, 'kodiaq', which
reads out the digitizers, will write data to MongoDB. This data from kodiaq can
either be triggered or untriggered. In the case of untriggered, an event builder
must be run on the data and will result in triggered data. Input and output
classes are provided for MongoDB access. More information is in the docstrings.
"""
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from itertools import chain
import datetime
import time
import pytz
import numpy as np
import pymongo
import snappy
import pickle
import monary
from pax.MongoDB_ClientMaker import ClientMaker, parse_passwordless_uri
from pax.datastructure import Event, Pulse, EventProxy
from pax import plugin, trigger, units, exceptions
class MongoBase:
_cached_subcollection_handles = {}
def startup(self):
self.sample_duration = self.config['sample_duration']
self.secret_mode = self.config['secret_mode']
# Connect to the runs db
self.cm = ClientMaker(self.processor.config['MongoDB'])
self.run_client = self.cm.get_client('run', autoreconnect=True)
self.runs_collection = self.run_client['run']['runs_new']
self.refresh_run_doc()
self.split_collections = self.run_doc['reader']['ini'].get('rotating_collections', 0)
if self.split_collections:
self.batch_window = int(self.sample_duration * (2 ** 31))
self.log.debug("Split collection mode: batch window forced to %s sec" % (self.batch_window / units.s))
else:
self.batch_window = self.config['batch_window']
self.input_info, self.hosts, self.dbs, self.input_collections = connect_to_eb_dbs(
clientmaker=self.cm,
run_doc=self.run_doc,
detector=self.config['detector'],
split_collections=self.split_collections)
self.split_hosts = len(self.hosts) != 1
start_datetime = self.run_doc['start'].replace(tzinfo=pytz.utc).timestamp()
self.time_of_run_start = int(start_datetime * units.s)
# Get the database in which the acquisition monitor data resides.
if not self.split_hosts:
# If we haven't split hosts, just take the one host we have.
self.aqm_db = self.dbs[0]
else:
aqm_host = self.config.get('acquisition_monitor_host', 'eb0')
db_i = self.hosts.index(aqm_host)
self.aqm_db = self.dbs[db_i]
def refresh_run_doc(self):
"""Update the internal run doc within this class
(does not change anything in the runs database)
This is useful for example checking if a run has ended.
"""
self.log.debug("Retrieving run doc")
self.run_doc = self.runs_collection.find_one({'_id': self.config['run_doc_id']})
self.log.debug("Run doc retrieved")
self.data_taking_ended = 'end' in self.run_doc
def subcollection_name(self, number):
"""Return name of subcollection number in the run"""
assert self.split_collections
return '%s_%s' % (self.run_doc['name'], number)
def subcollection(self, number, host_i=None):
"""Return pymongo collection object for subcollection number in the run
Caches subcollection handles for you, since it seems to take time to ask for the collection
every event
Actually this turned out to be some other bug... probably we can remove collection caching now.
"""
db = self.dbs[0 if host_i is None else host_i]
assert self.split_collections
cache_key = (number, host_i)
if cache_key in self._cached_subcollection_handles:
return self._cached_subcollection_handles[cache_key]
else:
coll = db.get_collection(self.subcollection_name(number))
self._cached_subcollection_handles[cache_key] = coll
return coll
def subcollection_with_time(self, time):
"""Returns the number of the subcollection which contains pulses which start at time
time: pax units (ns) since start of run
"""
assert self.split_collections
return int(time / self.batch_window)
def time_range_query(self, start=None, stop=None):
"""Returns Mongo query to find pulses that START in [start, stop)
Start and stop are each specified in pax units since start of the run.
"""
return {'time': {'$gte': self._to_mt(start),
'$lt': self._to_mt(stop)}}
def _to_mt(self, x):
"""Converts the time x from pax units to mongo units"""
return int(x // self.sample_duration)
def _from_mt(self, x):
"""Converts the time x from mongo units to pax units"""
return int(x * self.sample_duration)
def connect_to_eb_dbs(clientmaker, run_doc, detector='tpc', split_collections=True):
"""Connect to eventbuilder databases. Returns tuple of
- input_info (dictionary with all sorts of info about the connection, e.g. hostnames, collection names, ...)
- hosts: list of MongoDB uris (strings)
- dbs: list of pymongo db handles
- input_collections: None if split_collections, else list of pymongo collection handles
This was split off from the base class to allow re-use in external code (specifically the deleter in event-builder).
"""
for doc in run_doc['data']:
if doc['type'] == 'untriggered':
input_info = doc
break
else:
raise ValueError("Invalid run document: none of the 'data' entries contain untriggered data!")
if ';' in input_info['location']:
split_hosts = True
input_info['location'] = input_info['location'].split(';')[0]
else:
split_hosts = False
input_info['database'] = input_info['location'].split('/')[-1]
if input_info['database'] != 'untriggered' and detector == 'tpc':
raise ValueError("TPC data is expected in the 'untriggered' database,"
" but this run is in %s?!" % input_info['database'])
if split_hosts:
hosts = [parse_passwordless_uri(x)[0]
for x in set(run_doc['reader']['ini']['mongo']['hosts'].values())]
else:
hosts = [parse_passwordless_uri(input_info['location'])[0]]
# Make pymongo db handles for all hosts. Double work if not split_hosts, but avoids double code later
dbs = [clientmaker.get_client(database_name=input_info['database'],
host=host,
uri=input_info['location'],
w=0)[input_info['database']] for host in hosts]
if not split_collections:
input_collections = [db.get_collection(input_info['collection']) for db in dbs]
else:
input_collections = None
return input_info, hosts, dbs, input_collections
class MongoDBReadUntriggered(plugin.InputPlugin, MongoBase):
"""Read pulse times from MongoDB, pass them to the trigger,
and send off EventProxy's for MongoDBReadUntriggeredFiller.
"""
do_output_check = False
latest_subcollection = 0 # Last subcollection that was found to contain some data, last time we checked
def startup(self):
self.log.info("Eventbuilder input starting up")
MongoBase.startup(self)
self.detector = self.config['detector']
self.max_query_workers = self.config['max_query_workers']
self.last_pulse_time = 0 # time (in pax units, i.e. ns) at which the pulse which starts last in the run stops
# It would have been nicer to simply know the last stop time, but pulses are sorted by start time...
# Initialize the trigger
# For now, make a collection in trigger_monitor on the same eb as the untriggered collection
if not self.secret_mode:
self.uri_for_monitor = self.config['trigger_monitor_mongo_uri']
trig_mon_db = self.cm.get_client('trigger_monitor', uri=self.uri_for_monitor)['trigger_monitor']
trig_mon_coll = trig_mon_db.get_collection(self.run_doc['name'])
else:
trig_mon_coll = None
self.uri_for_monitor = 'nowhere, because secret mode was used'
self.log.info("Trigger starting up")
self.trigger = trigger.Trigger(pax_config=self.processor.config,
trigger_monitor_collection=trig_mon_coll)
self.log.info("Trigger startup successful")
# For starting event building in the middle of a run:
self.initial_start_time = self.config.get('start_after_sec', 0) * units.s
if self.initial_start_time:
self.latest_subcollection = self.initial_start_time // self.batch_window
self.log.info("Starting at %0.1f sec, subcollection %d" % (self.initial_start_time,
self.latest_subcollection))
self.pipeline_status_collection = self.run_client['run'][self.config.get('pipeline_status_collection_name',
'pipeline_status')]
self.log.info("Eventbuilder input startup successful")
def refresh_run_info(self):
"""Refreshes the run doc and last pulse time information.
Also updates the pipeline status info with the current queue length
"""
self.refresh_run_doc()
# Find the last collection with data in it
self.log.debug("Finding last collection")
if self.split_collections:
if self.data_taking_ended:
# Get all collection names, find the last subcollection with some data that belongs to the current run.
subcols_with_stuff = [int(x.split('_')[-1]) for x in self.dbs[0].collection_names()
if x.startswith(self.run_doc['name']) and
self.dbs[0].get_collection(x).count()]
if not len(subcols_with_stuff):
self.log.error("Run contains no collection(s) with any pulses!")
self.last_pulse_time = 0
# This should only happen at the beginning of a run, otherwise something is very wrong with the
# collection clearing algorithm!
assert self.latest_subcollection == 0
return
else:
self.latest_subcollection = max(subcols_with_stuff)
check_collection = self.subcollection(self.latest_subcollection)
else:
# While the DAQ is running, we can't use this method, as the reader creates empty collections
# ahead of the insertion point.
if self.config.get('use_run_status_doc'):
# Dan made a doc with the approximate insertion point of each digitizer: the min of these should
# be safe to use (more or less.. a slight delay is still advisable. ask Dan for details)
status_doc = self.dbs[0].get_collection('status').find_one({'collection': self.run_doc['name']})
if status_doc is None:
raise RuntimeError("Missing run status doc!")
safe_col = float('inf')
for k, v in status_doc:
if isinstance(v, int):
safe_col = min(v, safe_col)
safe_col -= 1
if safe_col < 0 or safe_col == float('inf'):
self.log.info("No subcollection is safe for triggering yet")
self.last_pulse_time = 0
return
self.latest_subcollection = safe_col
self.log.info("First safe subcollection is %d" % self.latest_subcollection)
else:
# Old method: find the last collection with some data, rely on large safety margin
# Keep fingers crossed. Instead, move forward in subcollections until we find one without data.
# If there is a large gap in the data, we won't progress beyond it until the run ends.
while True:
if not self.subcollection(self.latest_subcollection + 1).count():
break
self.latest_subcollection += 1
self.log.info("Last subcollection with data is %d" % self.latest_subcollection)
check_collection = self.subcollection(self.latest_subcollection)
else:
# There is just one collection (well, possibly one per host), just check that one.
check_collection = self.input_collections[0]
# Find the last pulse in the collection
cu = list(check_collection.find().sort('time', direction=pymongo.DESCENDING).limit(1))
if not len(cu):
if self.split_collections:
if not self.latest_subcollection == 0:
self.log.warning("Latest subcollection %d seems empty now, but wasn't before... Race condition/edge"
" case in mongodb, bug in clearing code, or something else weird? Investigate if "
"this occurs often!!" % self.latest_subcollection)
self.last_pulse_time = self.latest_subcollection * self.batch_window
else:
# Apparently the DAQ has not taken any pulses yet?
self.last_pulse_time = 0
else:
self.last_pulse_time = self._from_mt(cu[0]['time'])
if self.data_taking_ended:
self.log.info("The DAQ has stopped, last pulse time is %s" % pax_to_human_time(self.last_pulse_time))
if self.split_collections:
self.log.info("The last subcollection number is %d" % self.latest_subcollection)
# Does this correspond roughly to the run end time? If not, warn, DAQ may have crashed.
end_of_run_t = (self.run_doc['end'].timestamp() - self.run_doc['start'].timestamp()) * units.s
if not (0 <= end_of_run_t - self.last_pulse_time <= 60 * units.s):
self.log.warning("Run is %s long according to run db, but last pulse starts at %s. "
"Did the DAQ crash?" % (pax_to_human_time(end_of_run_t),
pax_to_human_time(self.last_pulse_time)))
# Insert some status info into the pipeline info
if not self.secret_mode:
if hasattr(self, 'last_time_searched'):
lts = self.last_time_searched
else:
lts = 0
self.pipeline_status_collection.insert({'name': 'eventbuilder_info',
'time': datetime.datetime.utcnow(),
'pax_id': self.config.get('pax_id', 'no_pax_id_set'),
'last_pulse_so_far_in_run': self.last_pulse_time,
'latest_subcollection': self.latest_subcollection,
'last_time_searched': lts,
'working_on_run': True})
def get_events(self):
self.log.info("Eventbuilder get_events starting up")
self.refresh_run_info()
self.log.info("Fetched runs db info successfully")
# Last time (ns) searched, exclusive. ie we searched [something, last_time_searched)
self.last_time_searched = self.initial_start_time
self.log.info("self.initial_start_time: %s", pax_to_human_time(self.initial_start_time))
next_event_number = 0
more_data_coming = True
while more_data_coming:
# Refresh the run info, to find out if data taking has ended
if not self.data_taking_ended:
self.refresh_run_info()
# What is the last time we need to search?
if self.data_taking_ended:
end_of_search_for_this_run = self.last_pulse_time + self.batch_window
else:
end_of_search_for_this_run = float('inf')
# What is the earliest time we still need to search?
next_time_to_search = self.last_time_searched
if next_time_to_search != self.initial_start_time:
next_time_to_search += self.batch_window * self.config['skip_ahead']
# How many batch windows can we search now?
if self.data_taking_ended:
batches_to_search = int((end_of_search_for_this_run - next_time_to_search) / self.batch_window) + 1
else:
# Make sure we only query data that is edge_safety_margin away from the last pulse time.
# This is because the readers are inserting the pulse data slightly asynchronously.
# Also make sure we only query once a full batch window of such safe data is available (to avoid
# mini-queries).
duration_of_searchable = self.last_pulse_time - self.config['edge_safety_margin'] - next_time_to_search
batches_to_search = int(duration_of_searchable / self.batch_window)
if batches_to_search < 1:
self.log.info("DAQ has not taken sufficient data to continue. Sleeping 5 sec...")
time.sleep(5)
continue
batches_to_search = min(batches_to_search, self.max_query_workers // len(self.hosts))
# Start new queries in separate processes
with ThreadPoolExecutor(max_workers=self.max_query_workers) as executor:
futures = []
for batch_i in range(batches_to_search):
futures_per_host = []
# Get the query, and collection name needed for it
start = next_time_to_search + batch_i * self.batch_window
if self.split_collections:
subcol_i = self.subcollection_with_time(next_time_to_search) + batch_i
# Prep the query -- not a very difficult one :-)
query = {}
collection_name = self.subcollection_name(subcol_i)
self.log.info("Submitting query for subcollection %d" % subcol_i)
else:
collection_name = self.run_doc['name']
stop = start + self.batch_window
query = self.time_range_query(start, stop)
self.log.info("Submitting query for batch %d, time range [%s, %s)" % (
batch_i, pax_to_human_time(start), pax_to_human_time(stop)))
# Do the query on each host
for host in self.hosts:
future = executor.submit(get_pulses,
client_maker_config=self.cm.config,
query=query,
input_info=self.input_info,
collection_name=collection_name,
host=host,
get_area=self.config['can_get_area'])
futures_per_host.append(future)
futures.append(futures_per_host)
# Record advancement of the batch window
self.last_time_searched = next_time_to_search + batches_to_search * self.batch_window
# Check if there is more data
more_data_coming = (not self.data_taking_ended) or (self.last_time_searched <
end_of_search_for_this_run)
if not more_data_coming:
self.log.info("Searched to %s, which is beyond %s. This is the last batch of data" % (
pax_to_human_time(self.last_time_searched), pax_to_human_time(end_of_search_for_this_run)))
# Check if we've passed the user-specified stop (if so configured)
stop_after_sec = self.config.get('stop_after_sec', None)
if stop_after_sec and 0 < stop_after_sec < float('inf'):
if self.last_time_searched > stop_after_sec * units.s:
self.log.warning("Searched to %s, which is beyond the user-specified stop at %d sec."
"This is the last batch of data" % (self.last_time_searched,
self.config['stop_after_sec']))
more_data_coming = False
# Retrieve results from the queries, then pass everything to the trigger
for i, futures_per_host in enumerate(futures):
if len(futures_per_host) == 1:
assert not self.split_hosts
times, modules, channels, areas = futures_per_host[0].result()
else:
assert self.split_hosts
times = []
modules = []
channels = []
areas = []
for f in futures_per_host:
ts, ms, chs, ars = f.result()
times.append(ts)
modules.append(ms)
channels.append(chs)
areas.append(ars)
times = np.concatenate(times)
modules = np.concatenate(modules)
channels = np.concatenate(channels)
areas = np.concatenate(areas)
times = times * self.sample_duration
if len(times):
self.log.info("Batch %d: acquired pulses in range [%s, %s]" % (
i,
pax_to_human_time(times[0]),
pax_to_human_time(times[-1])))
else:
self.log.info("Batch %d: No pulse data found." % i)
# Send the new data to the trigger, which will build events from it
# Note the data is still unsorted: the trigger will take care of sorting it.
for data in self.trigger.run(last_time_searched=next_time_to_search + (i + 1) * self.batch_window,
start_times=times,
channels=channels,
modules=modules,
areas=areas,
last_data=(not more_data_coming and i == len(futures) - 1)):
yield EventProxy(event_number=next_event_number, data=data, block_id=-1)
next_event_number += 1
# We've built all the events for this run!
# Compile the end of run info for the run doc and for display
trigger_end_info = self.trigger.shutdown()
trigger_end_info.update(dict(ended=True,
status='deleted' if self.config['delete_data'] else 'processed',
trigger_monitor_data_location=self.uri_for_monitor,
mongo_reader_config={k: v for k, v in self.config.items()
if k != 'password' and
k not in self.processor.config['DEFAULT']}))
if not self.secret_mode:
end_of_run_info = {'trigger.%s' % k: v for k, v in trigger_end_info.items()}
self.runs_collection.update_one({'_id': self.config['run_doc_id']},
{'$set': end_of_run_info})
self.log.info("Event building complete. Trigger information: %s" % trigger_end_info)
class MongoDBReadUntriggeredFiller(plugin.TransformPlugin, MongoBase):
"""Read pulse data into event ranges provided by trigger MongoDBReadUntriggered.
This is a separate plugin, since reading the raw pulse data is the expensive operation we want to parallelize.
"""
do_input_check = False
def startup(self):
MongoBase.startup(self)
self.ignored_channels = []
self.max_pulses_per_event = self.config.get('max_pulses_per_event', float('inf'))
self.high_energy_prescale = self.config.get('high_energy_prescale', 0.1)
self.log.info("Software HEV settings: %s max pulses per event, %s prescale" % (self.max_pulses_per_event,
self.high_energy_prescale))
# Load the digitizer channel -> PMT index mapping
self.detector = self.config['detector']
self.pmts = self.config['pmts']
self.pmt_mappings = {(x['digitizer']['module'],
x['digitizer']['channel']): x['pmt_position'] for x in self.pmts}
def _get_cursor_between_times(self, start, stop, subcollection_number=None):
"""Returns count, cursor over all pulses that start in [start, stop) (both pax units since start of run).
Order of pulses is not defined.
count is 0 if max_pulses_per_event is float('inf'), since we don't care about it in that case.
Does NOT deal with time ranges split between subcollections, but does deal with split hosts.
"""
cursors = []
count = 0
for host_i, host in enumerate(self.hosts):
if subcollection_number is None:
assert not self.split_collections
collection = self.input_collections[host_i]
else:
assert self.split_collections
collection = self.subcollection(subcollection_number, host_i)
query = self.time_range_query(start, stop)
cursor = collection.find(query)
# Ask for a large batch size: the default is 101 documents or 1MB. This results in a very small speed
# increase (when I measured it on a normal dataset)
cursor.batch_size(int(1e7))
cursors.append(cursor)
if self.max_pulses_per_event != float('inf'):
count += collection.count(query)
if len(self.hosts) == 1:
return count, cursors[0]
else:
return count, chain(*cursors)
def transform_event(self, event_proxy):
# t0, t1 are the start, stop time of the event in pax units (ns) since the start of the run
(t0, t1), trigger_signals = event_proxy.data
self.log.debug("Fetching data for event with range [%s, %s]",
pax_to_human_time(t0),
pax_to_human_time(t1))
event = Event(n_channels=self.config['n_channels'],
block_id=event_proxy.block_id,
start_time=t0 + self.time_of_run_start,
sample_duration=self.sample_duration,
stop_time=t1 + self.time_of_run_start,
dataset_name=self.run_doc['name'],
event_number=event_proxy.event_number,
trigger_signals=trigger_signals)
# Convert trigger signal times to time since start of event
event.trigger_signals['left_time'] -= t0
event.trigger_signals['right_time'] -= t0
event.trigger_signals['time_mean'] -= t0
if self.split_collections:
start_col = self.subcollection_with_time(t0)
end_col = self.subcollection_with_time(t1)
if start_col == end_col:
count, mongo_iterator = self._get_cursor_between_times(t0, t1, start_col)
if count > self.max_pulses_per_event:
# Software "veto" the event to prevent overloading the event builder
if np.random.rand() > self.high_energy_prescale:
self.log.debug("VETO: %d pulses in event %s" % (len(event.pulses), event.event_number))
event.n_pulses = int(count)
return event
else:
self.log.info("Found event [%s-%s] which straddles subcollection boundary." % (
pax_to_human_time(t0), pax_to_human_time(t1)))
# Ignore the software-HEV in this case
mongo_iterator = chain(self._get_cursor_between_times(t0, t1, start_col)[1],
self._get_cursor_between_times(t0, t1, end_col)[1])
else:
mongo_iterator = self._get_cursor_between_times(t0, t1)
data_is_compressed = self.input_info['compressed']
for i, pulse_doc in enumerate(mongo_iterator):
digitizer_id = (pulse_doc['module'], pulse_doc['channel'])
pmt = self.pmt_mappings.get(digitizer_id)
if pmt is not None:
# Fetch the raw data
data = pulse_doc['data']
if data_is_compressed:
data = snappy.decompress(data)
time_within_event = self._from_mt(pulse_doc['time']) - t0 # ns
event.pulses.append(Pulse(left=self._to_mt(time_within_event),
raw_data=np.fromstring(data,
dtype="<i2"),
channel=pmt,
do_it_fast=True))
elif digitizer_id not in self.ignored_channels:
self.log.warning("Found data from digitizer module %d, channel %d,"
"which doesn't exist according to PMT mapping! Ignoring...",
pulse_doc['module'], pulse_doc['channel'])
self.ignored_channels.append(digitizer_id)
self.log.debug("%d pulses in event %s" % (len(event.pulses), event.event_number))
return event
class MongoDBClearUntriggered(plugin.TransformPlugin, MongoBase):
"""Clears data whose events have been built from MongoDB,
rescuing acquisition monitor pulses to a separate file first.
This must run as part of the output plugin group, so it gets the events in order.
It does not use the events' content, but needs to know which event times have been processed.
If split_collections:
Drop sub collections when events from subsequent collections start arriving.
Drop all remaining subcollections on shutdown.
Else (single collection mode):
Keeps track of which time is safe to delete, then deletes data from the collection in batches.
At shutdown, drop the collection
"""
do_input_check = False
do_output_check = False
last_time_deleted = 0
last_subcollection_not_yet_deleted = 0
def startup(self):
MongoBase.startup(self)
self.executor = ThreadPoolExecutor(max_workers=self.config['max_query_workers'])
# Should we actually delete data, or just rescue the acquisition monitor pulses?
self.actually_delete = self.config.get('delete_data', False)
if self.actually_delete:
self.log.info("Data will be DELETED from the Mongo database after it is acquired!")
else:
self.log.info("Data will REMAIN in the Mongo database (until delete permission is acquired).")
aqm_file_path = self.config.get('acquisition_monitor_file_path')
if aqm_file_path is None:
self.log.info("No acquisition monitor data file path given -- will NOT rescue acquisition monitor pulses!")
self.aqm_output_handle = None
elif 'sum_wv' not in self.config['channels_in_detector']:
self.log.warning("Acquisition monitor path specified, "
"but your detector doesn't have an acquisition monitor?")
self.aqm_output_handle = None
else:
# Get the acquisition monitor module number from the pmts dictionary in the config
# It's a bit bad we've hardcoded 'sum_wv' as detector name here...
some_ch_from_aqm = self.config['channels_in_detector']['sum_wv'][0]
self.aqm_module = self.config['pmts'][some_ch_from_aqm]['digitizer']['module']
self.log.info("Acquisition monitor (module %d) pulses will be saved to %s" % (
self.aqm_module, aqm_file_path))
self.aqm_output_handle = open(aqm_file_path, mode='wb')
# Add some random content to make Boris and ruciax happy
# (ensure a unique checksum even if there are no pulses or the DAQ crashes)
self.aqm_output_handle.write(pickle.dumps("Pax rules! Random numbers of the day: %s" % np.random.randn(3)))
self.already_rescued_collections = []
def transform_event(self, event_proxy):
time_since_start = event_proxy.data['stop_time'] - self.time_of_run_start
if self.split_collections:
coll_number = self.subcollection_with_time(time_since_start)
while coll_number > self.last_subcollection_not_yet_deleted:
self.log.info("Seen event at subcollection %d, clearing subcollection %d" % (
coll_number, self.last_subcollection_not_yet_deleted))
self.drop_collection_named(self.subcollection_name(self.last_subcollection_not_yet_deleted),
self.executor)
self.last_subcollection_not_yet_deleted += 1
else:
if self.input_collections is None:
raise RuntimeError("Wtf??")
if time_since_start > self.last_time_deleted + self.config['batch_window']:
self.log.info("Seen event at %s, clearing all data until then." % pax_to_human_time(time_since_start))
for coll in self.input_collections:
self.executor.submit(self.delete_pulses,
coll,
start_mongo_time=self._to_mt(self.last_time_deleted),
stop_mongo_time=self._to_mt(time_since_start))
self.last_time_deleted = time_since_start
return event_proxy
def shutdown(self):
# Wait for any slow drops to complete
self.log.info("Waiting for slow collection drops/rescues to complete...")
self.executor.shutdown()
self.log.info("Collection drops/rescues should be complete. Checking for remaining collections.")
pulses_in_remaining_collections = defaultdict(int)
for db in self.dbs:
for coll_name in db.collection_names():
if not coll_name.startswith(self.run_doc['name']):
continue
if coll_name in self.already_rescued_collections and not self.actually_delete:
# Of course these collections are still there, don't count them as 'remaining'
continue
pulses_in_remaining_collections[coll_name] += db[coll_name].count()
if len(pulses_in_remaining_collections):
self.log.info("Leftover collections with pulse counts: %s. Clearing/rescuing these now." % (
str(pulses_in_remaining_collections)))
for colname in pulses_in_remaining_collections.keys():
self.drop_collection_named(colname)
self.log.info("Completed.")
else:
self.log.info("All collections have already been cleaned, great.")
if self.actually_delete:
# Update the run doc to remove the 'untriggered' entry
# since we just deleted the last of the untriggered data
self.refresh_run_doc()
self.runs_collection.update_one({'_id': self.run_doc['_id']},
{'$set': {'data': [d for d in self.run_doc['data']
if d['type'] != 'untriggered']}})
if hasattr(self, 'aqm_output_handle') and self.aqm_output_handle is not None:
self.aqm_output_handle.close()
def rescue_acquisition_monitor_pulses(self, collection, query=None):
"""Saves all acquisition monitor pulses from collection the acquisition monitor data file.
- collection: pymongo object (not collection name!)
- query: optional query inside the collection (e.g. for a specific time range).
The condition to select pulses from the acquistion monitor module will be added to this.
"""
if self.aqm_output_handle is None:
return
if query is None:
query = {}
query['module'] = self.aqm_module
# Count first, in case something is badly wrong and we end up saving bazillions of docs we'll at least have
# a fair warning...
n_to_rescue = collection.count(query)
self.log.info("Saving %d acquisition monitor pulses" % n_to_rescue)
for doc in collection.find(query):
self.aqm_output_handle.write(pickle.dumps(doc))
# Flush explicitly: we want to save the data even if the event builder crashes before properly closing the file
self.aqm_output_handle.flush()
def delete_pulses(self, collection, start_mongo_time, stop_mongo_time):
"""Deletes all pulses in collection between start_mongo_time (inclusive) and stop_mongo_time (exclusive),
both in mongo time units (not pax units!). Rescues acquisition monitor pulses just before deleting.
"""
query = {'time': {'$gte': start_mongo_time,
'$lt': stop_mongo_time}}
self.rescue_acquisition_monitor_pulses(collection, query)
if self.actually_delete:
collection.delete_many(query)
def drop_collection_named(self, collection_name, executor=None):
"""Drop the collection named collection_name from db, rescueing acquisition monitor pulses first.
if executor is passed, will execute the drop command via the pool it represents.
This function is NOT parallelizable itself, don't pass it to an executor!
We need to block while rescuing acquisition monitor pulses: otherwise, we would get to the final cleanup in
shutdown() while there are still collections being rescued.
"""
if self.aqm_db is not None:
if collection_name not in self.already_rescued_collections:
self.already_rescued_collections.append(collection_name)
self.rescue_acquisition_monitor_pulses(self.aqm_db[collection_name])
else:
self.log.warning("Duplicated call to rescue/delete collection %s!" % collection_name)
if not self.actually_delete:
return
for db in self.dbs:
if executor is None:
db.drop_collection(collection_name)
else:
executor.submit(db.drop_collection, collection_name)
def pax_to_human_time(num):
"""Converts a pax time to a human-readable representation"""
for x in ['ns', 'us', 'ms', 's', 'ks', 'Ms', 'G', 'T']:
if num < 1000.0:
return "%3.3f %s" % (num, x)
num /= 1000.0
return "%3.1f %s" % (num, 's')
def get_pulses(client_maker_config, input_info, collection_name, query, host, get_area=False):
"""Find pulse times according to query using monary.
Returns four numpy arrays: times, modules, channels, areas.
Areas consists of zeros unless get_area = True, in which we also fetch the 'integral' field.
The monary client is created inside this function, so we could run it with ProcessPoolExecutor.
"""
fields = ['time', 'module', 'channel'] + (['integral'] if get_area else [])
types = ['int64', 'int32', 'int32'] + (['area'] if get_area else [])
try:
client_maker = ClientMaker(client_maker_config)
monary_client = client_maker.get_client(database_name=input_info['database'],
uri=input_info['location'],
host=host,
monary=True)
# Somehow monary's block query fails when we have multiple blocks,
# we need to take care of copying out the data ourselves, but even if I use .copy it doesn't seem to work
# Never mind, just make a big block
results = list(monary_client.block_query(input_info['database'], collection_name, query, fields, types,
block_size=int(5e8),
select_fields=True))
monary_client.close()
except monary.monary.MonaryError as e:
if 'Failed to resolve' in str(e):
raise exceptions.DatabaseConnectivityError("Caught error trying to connect to untriggered database. "
"Original exception: %s." % str(e))
raise e
if not len(results) or not len(results[0]):
times = np.zeros(0, dtype=np.int64)
modules = np.zeros(0, dtype=np.int32)
channels = np.zeros(0, dtype=np.int32)
areas = np.zeros(0, dtype=np.float64)
else:
# Concatenate results from multiple blocks, in case multiple blocks were needed
results = [np.concatenate([results[i][j]
for i in range(len(results))])
for j in range(len(results[0]))]
if get_area:
times, modules, channels, areas = results
else:
times, modules, channels = results
areas = np.zeros(len(times), dtype=np.float64)
return times, modules, channels, areas
| bsd-3-clause | -4,955,728,548,880,822,000 | 50.881418 | 120 | 0.567355 | false |
PurpleHominid/The-Warlock-of-Firetop-Mountain | WOFT_01.py | 1 | 1632 | class clsLocation:
#this class provides the support for enumerated locations
ROOM=0
DOOR=1
WALL=2
#some more changes went here before the start of the service
#
#
#this branch is called development 01
#this is another branch here
class clsPlayerState:
#this class provides the functions to support the player state
#define class based variables; common to all instances
__playerCount=0 #create a common variable; use '__' to hide the variable
def __init__(self, startState):
#this function is automatically executed when a new class instance is created
clsPlayerState.__playerCount+=1 #increase the hidden player count
#define instance variables, specific to single instance
self.location=startState #initialise the stating location
def fnUpdate(self):
#this function updates the players state
if self.location==clsLocation.ROOM: #at the room
self.fnROOM() #create options for room
elif self.location==clsLocation.DOOR: #at the door
self.fnDOOR() #create options for door
elif self.location==clsLocation.WALL: #at the wall
self.fnWALL() #create options for wall
def fnROOM(self):
#describe the location
print("You are at the room")
def fnDOOR(self):
#describe the location
print("You are at the door")
def fnWALL(self):
#describe the location
print("You are at the wall")
#begin the main code
insPlayer=clsPlayerState(clsLocation.ROOM) #initialise the player instance using the class
insPlayer.fnUpdate()
| mit | 5,276,123,399,268,443,000 | 27.137931 | 90 | 0.681985 | false |
hootnot/oandapyV20-examples | src/contrib_mo_tp_sl.py | 1 | 1328 | # -*- coding: utf-8 -*-
"""Example demonstrating the contrib.request classes.
Create a MarketOrderRequest to enter 10000 EUR_USD LONG position along with
- a TakeProfitOrder to take profit @1.10
- a StopLossOrder to take loss @1.07
These values apply for this moment: EUR_USD 1.0605
So when you run the example you may need to change the values.
"""
import json
from oandapyV20.contrib.requests import (
MarketOrderRequest,
TakeProfitDetails,
StopLossDetails
)
import oandapyV20.endpoints.orders as orders
import oandapyV20
from exampleauth import exampleAuth
accountID, access_token = exampleAuth()
api = oandapyV20.API(access_token=access_token)
# EUR_USD (today 1.0605)
EUR_USD_STOP_LOSS = 1.05
EUR_USD_TAKE_PROFIT = 1.10
# The orderspecs
mktOrder = MarketOrderRequest(
instrument="EUR_USD",
units=10000,
takeProfitOnFill=TakeProfitDetails(price=EUR_USD_TAKE_PROFIT).data,
stopLossOnFill=StopLossDetails(price=EUR_USD_STOP_LOSS).data
)
print("Market Order specs: \n{}".format(json.dumps(mktOrder.data, indent=4)))
# create the OrderCreate request
r = orders.OrderCreate(accountID, data=mktOrder.data)
try:
# create the OrderCreate request
rv = api.request(r)
except oandapyV20.exceptions.V20Error as err:
print(r.status_code, err)
else:
print(json.dumps(rv, indent=2))
| mit | -5,632,243,889,746,396,000 | 25.039216 | 77 | 0.750753 | false |
thommiller/ANN---Pymier-League | Main.py | 1 | 1763 | import numpy as np
# sigmoid function
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
def displayPred(num):
if(num> 0.5 and num <0.75):
return " - Draw"
elif(num>0.75):
return " - Win"
else:
return " - Loss"
# for training data we will compare Man-Utd's last 10 games
# input data will be [homeTeam, awayTeam]
# output data will be [0 | loss, 0.5 | draw, 1 | win]
# input dataset - every football match from 2014-2015 (MASSIVE WEB SCRAPING TASK)
#man u = 0, stoke = 1, yeovil town = 2, QPR = 3, cambridge = 4, leicester = 5
teams = ["Man U", "Stoke", "Yeovil Town", "QPR", "Cambridge", "Leicester"]
X = np.array([ [1,0], #stoke vs man u - draw
[0,2], #yeovil town vs man u - won
[3,0],
[4,0],
[0,5]
])
# output dataset
y = np.array([[0.5,1,1,0.5,1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 3*np.random.random((2,1)) - 1
for iter in xrange(10000):
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0,syn0))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1,True)
# update weights
syn0 += np.dot(l0.T, l1_delta)
print "Game predictions based on training data:"
print teams[1],"\t\tvs\t",teams[0], displayPred(l1[0])
print teams[0],"\t\tvs\t",teams[2], displayPred(l1[1])
print teams[3],"\t\tvs\t",teams[0], displayPred(l1[2])
print teams[4],"\tvs\t",teams[0], displayPred(l1[3])
print teams[0],"\t\tvs\t",teams[5], displayPred(l1[4])
| mit | 3,757,302,599,893,747,700 | 27.435484 | 81 | 0.600681 | false |
ibrahimkarahan/Flexget | flexget/plugins/output/exec.py | 1 | 7984 | from __future__ import unicode_literals, division, absolute_import
import logging
import subprocess
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.template import render_from_entry, render_from_task, RenderError
from flexget.utils.tools import io_encoding
log = logging.getLogger('exec')
class EscapingEntry(Entry):
"""Helper class, same as a Entry, but returns all string value with quotes escaped."""
def __init__(self, entry):
super(EscapingEntry, self).__init__(entry)
def __getitem__(self, key):
value = super(EscapingEntry, self).__getitem__(key)
# TODO: May need to be different depending on OS
if isinstance(value, basestring):
value = value.replace('"', '\\"')
return value
class PluginExec(object):
"""
Execute commands
Simple example, xecute command for entries that reach output::
exec: echo 'found {{title}} at {{url}}' > file
Advanced Example::
exec:
on_start:
phase: echo "Started"
on_input:
for_entries: echo 'got {{title}}'
on_output:
for_accepted: echo 'accepted {{title}} - {{url}} > file
You can use all (available) entry fields in the command.
"""
NAME = 'exec'
HANDLED_PHASES = ['start', 'input', 'filter', 'output', 'exit']
schema = {
'oneOf': [
one_or_more({'type': 'string'}),
{
'type': 'object',
'properties': {
'on_start': {'$ref': '#/definitions/phaseSettings'},
'on_input': {'$ref': '#/definitions/phaseSettings'},
'on_filter': {'$ref': '#/definitions/phaseSettings'},
'on_output': {'$ref': '#/definitions/phaseSettings'},
'on_exit': {'$ref': '#/definitions/phaseSettings'},
'fail_entries': {'type': 'boolean'},
'auto_escape': {'type': 'boolean'},
'encoding': {'type': 'string'},
'allow_background': {'type': 'boolean'}
},
'additionalProperties': False
}
],
'definitions': {
'phaseSettings': {
'type': 'object',
'properties': {
'phase': one_or_more({'type': 'string'}),
'for_entries': one_or_more({'type': 'string'}),
'for_accepted': one_or_more({'type': 'string'}),
'for_rejected': one_or_more({'type': 'string'}),
'for_failed': one_or_more({'type': 'string'})
},
'additionalProperties': False
}
}
}
def prepare_config(self, config):
if isinstance(config, basestring):
config = [config]
if isinstance(config, list):
config = {'on_output': {'for_accepted': config}}
if not config.get('encoding'):
config['encoding'] = io_encoding
for phase_name in config:
if phase_name.startswith('on_'):
for items_name in config[phase_name]:
if isinstance(config[phase_name][items_name], basestring):
config[phase_name][items_name] = [config[phase_name][items_name]]
return config
def execute_cmd(self, cmd, allow_background, encoding):
log.verbose('Executing: %s' % cmd)
p = subprocess.Popen(cmd.encode(encoding), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=False)
if not allow_background:
(r, w) = (p.stdout, p.stdin)
response = r.read().decode(encoding, 'replace')
r.close()
w.close()
if response:
log.info('Stdout: %s' % response)
return p.wait()
def execute(self, task, phase_name, config):
config = self.prepare_config(config)
if phase_name not in config:
log.debug('phase %s not configured' % phase_name)
return
name_map = {'for_entries': task.entries, 'for_accepted': task.accepted,
'for_rejected': task.rejected, 'for_failed': task.failed}
allow_background = config.get('allow_background')
for operation, entries in name_map.iteritems():
if operation not in config[phase_name]:
continue
log.debug('running phase_name: %s operation: %s entries: %s' % (phase_name, operation, len(entries)))
for entry in entries:
for cmd in config[phase_name][operation]:
entrydict = EscapingEntry(entry) if config.get('auto_escape') else entry
# Do string replacement from entry, but make sure quotes get escaped
try:
cmd = render_from_entry(cmd, entrydict)
except RenderError as e:
log.error('Could not set exec command for %s: %s' % (entry['title'], e))
# fail the entry if configured to do so
if config.get('fail_entries'):
entry.fail('Entry `%s` does not have required fields for string replacement.' %
entry['title'])
continue
log.debug('phase_name: %s operation: %s cmd: %s' % (phase_name, operation, cmd))
if task.options.test:
log.info('Would execute: %s' % cmd)
else:
# Make sure the command can be encoded into appropriate encoding, don't actually encode yet,
# so logging continues to work.
try:
cmd.encode(config['encoding'])
except UnicodeEncodeError:
log.error('Unable to encode cmd `%s` to %s' % (cmd, config['encoding']))
if config.get('fail_entries'):
entry.fail('cmd `%s` could not be encoded to %s.' % (cmd, config['encoding']))
continue
# Run the command, fail entries with non-zero return code if configured to
if (self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and
config.get('fail_entries')):
entry.fail('exec return code was non-zero')
# phase keyword in this
if 'phase' in config[phase_name]:
for cmd in config[phase_name]['phase']:
try:
cmd = render_from_task(cmd, task)
except RenderError as e:
log.error('Error rendering `%s`: %s' % (cmd, e))
else:
log.debug('phase cmd: %s' % cmd)
if task.options.test:
log.info('Would execute: %s' % cmd)
else:
self.execute_cmd(cmd, allow_background, config['encoding'])
def __getattr__(self, item):
"""Creates methods to handle task phases."""
for phase in self.HANDLED_PHASES:
if item == plugin.phase_methods[phase]:
# A phase method we handle has been requested
break
else:
# We don't handle this phase
raise AttributeError(item)
def phase_handler(task, config):
self.execute(task, 'on_' + phase, config)
# Make sure we run after other plugins so exec can use their output
phase_handler.priority = 100
return phase_handler
@event('plugin.register')
def register_plugin():
plugin.register(PluginExec, 'exec', api_ver=2)
| mit | 1,534,387,490,243,318,500 | 39.527919 | 116 | 0.516032 | false |
Outernet-Project/librarian | librarian/tasks/notifications.py | 1 | 1481 | import datetime
import logging
from greentasks import Task
from ..core.exts import ext_container as exts
from ..core.utils import utcnow
class NotificationCleanupTask(Task):
name = 'notifications'
periodic = True
def get_start_delay(self):
return exts.config['notifications.default_expiry']
def get_delay(self, previous_delay):
return exts.config['notifications.default_expiry']
def run(self):
db = exts.databases.librarian
default_expiry = exts.config['notifications.default_expiry']
logging.debug("Notification cleanup started.")
now = utcnow()
auto_expires_at = now - datetime.timedelta(seconds=default_expiry)
where = '''notifications.dismissable = true AND (
(notifications.expires_at IS NULL AND
notifications.created_at <= %(auto_expires_at)s) OR
notifications.expires_at <= %(now)s)'''
query = db.Delete('notifications', where=where)
target_query = db.Delete('notification_targets USING notifications',
where=where)
target_query.where += ('notification_targets.notification_id = '
'notifications.notification_id')
params = dict(now=now, auto_expires_at=auto_expires_at)
db.execute(target_query, params)
rows = db.execute(query, params)
logging.debug("{} expired notifications deleted.".format(rows))
| gpl-3.0 | -2,417,505,482,011,534,000 | 37.973684 | 76 | 0.633356 | false |
wolf9s/doconce | lib/doconce/mwiki.py | 1 | 17031 | """
MediaWiki translator, aimed at Wikipedia/WikiBooks type of web pages.
Syntax defined by http://en.wikipedia.org/wiki/Help:Wiki_markup
and http://en.wikipedia.org/wiki/Help:Displaying_a_formula.
The prefix m in the name mwiki distinguishes this translator from
gwiki (googlecode wiki).
Not yet implemented:
mwiki_ref_and_label (just using code from gwiki)
Just using plan ASCII solutions for index_bib (requires some work to
port to MediaWiki, but is straightforward - use rst as template) and
exercise (probably ok with the plain solution).
GitHub wiki pages understand MediaWiki, see
https://github.com/github/gollum
The page http://en.wikibooks.org/wiki/Wikibooks:Sandbox is fine for
short-lived experiments.
http://shoutwiki.com can host MediaWiki pages.
http://jumpwiki.com/wiki/Main_Page can also host MediaWiki pages, but
there are troubles with align envirs and math (ugly typesetting and
some strange indents).
Create a user account, choose *Create a Wiki* in the menu on the left,
fill out the form, wait until you get a Main Page, click on edit, make
references to a new page, say [[First demo|demo]], save, click on
demo and fill out that page with the content of a mydoconcefile.wiki,
sometimes it is necessary to create a new account, just do that and
go back.
"""
import re, os, commands, sys
from common import default_movie, plain_exercise, insert_code_and_tex
from plaintext import plain_quiz
from misc import _abort
def align2equations(math_text):
"""
Transform an align environment to a set of equation environments.
Used to handle multiple equations if align does not work well.
Note: This version is outdated. common.align2equations is the
newest attempt to implement align in terms of single equations.
"""
if not '{align' in math_text:
return
math_text = math_text.replace('&', '')
math_text = math_text.replace('\\\\', r"""
</math>
:<math>""")
pattern = r'\\(begin|end)\{align\*?\}\s*'
math_text = re.sub(pattern, '', math_text)
# :<math> and </math> surroundings appear when !bt and !et are translated
return math_text
def equation2nothing(math_text):
pattern = r'\\(begin|end)\{equation\*?\}\s*'
math_text = re.sub(pattern, '', math_text)
math_text = math_text.replace(r'\[', '')
math_text = math_text.replace(r'\]', '')
return math_text
def remove_labels(math_text):
pattern = 'label\{(.+?)\}\s*'
labels = re.findall(pattern, math_text)
if labels:
math_text = re.sub(pattern, '', math_text)
return math_text, labels
def mwiki_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
# http://en.wikipedia.org/wiki/Help:Displaying_a_formula
# MediaWiki math does not support labels in equations.
# The enviros equation and \[ \] must be removed (not supported).
for i in range(len(tex_blocks)):
# Standard align works in Wikipedia and Wikibooks.
# Standard align gives somewhat ugly output on wiiki.com services,
# but a set of separate equations is not much better.
# We therefore stick to align instead.
#tex_blocks[i] = align2equations(tex_blocks[i])
tex_blocks[i] = equation2nothing(tex_blocks[i])
tex_blocks[i], labels = remove_labels(tex_blocks[i])
for label in labels:
if label in filestr:
print '*** warning: reference to label "%s" in an equation does not work in MediaWiki' % label
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, format)
# Supported programming languages:
# http://www.mediawiki.org/wiki/Extension:SyntaxHighlight_GeSHi#Supported_languages
envir2lang = dict(cod='python', pycod='python', cycod='python',
fcod='fortran', ccod='c', cppcod='cpp',
mcod='matlab', plcod='perl', shcod='bash',
pro='python', pypro='python', cypro='python',
fpro='fortran', cpro='c', cpppro='cpp',
mpro='matlab', plpro='perl', shpro='bash',
rbpro='ruby', rbcod='ruby',
javacod='java', javapro='java',
htmlcod='html5', xmlcod='xml',
htmlpro='html5', xmlpro='xml',
html='html5', xml='xml',
sys='bash', dat='text', csv='text', txt='text',
pyoptpro='python', pyscpro='python',
ipy='python', pyshell='python',
)
for key in envir2lang:
language = envir2lang[key]
cpattern = re.compile(r'^!bc\s+%s\s*\n' % key, flags=re.MULTILINE)
filestr = cpattern.sub('<syntaxhighlight lang="%s">\n' % \
envir2lang[key], filestr)
c = re.compile(r'^!bc.*$\n', re.MULTILINE)
filestr = c.sub('<syntaxhighlight lang="text">\n', filestr)
filestr = re.sub(r'!ec\n', '</syntaxhighlight>\n', filestr)
c = re.compile(r'^!bt\n', re.MULTILINE)
filestr = c.sub(':<math>\n', filestr)
filestr = re.sub(r'!et\n', '</math>\n', filestr)
# Final fix of MediaWiki file
# __TOC__ syntax is misinterpretated as paragraph heading, so we
# use <<<TOC>>> instead and replace to right syntax here at the end.
filestr = filestr.replace('<<<TOC>>>', '__TOC__')
return filestr
def mwiki_figure(m):
filename = m.group('filename')
link = filename if filename.startswith('http') else None
if not link and not os.path.isfile(filename):
raise IOError('no figure file %s' % filename)
basename = os.path.basename(filename)
stem, ext = os.path.splitext(basename)
root, ext = os.path.splitext(filename)
if link is None:
if not ext in '.png .gif .jpg .jpeg'.split():
# try to convert image file to PNG, using
# convert from ImageMagick:
cmd = 'convert %s png:%s' % (filename, root+'.png')
failure, output = commands.getstatusoutput(cmd)
if failure:
print '\n**** warning: could not run ', cmd
print ' convert %s to PNG format manually' % filename
_abort()
filename = root + '.png'
caption = m.group('caption').strip()
if caption != '':
caption = '|' + caption # add | for non-empty caption
else:
# Avoid filename as caption when caption is empty
# see http://www.mediawiki.org/wiki/Help:Images
caption = '|<span title=""></span>'
# keep label if it's there:
caption = re.sub(r'label\{(.+?)\}', '(\g<1>)', caption)
size = ''
opts = m.group('options').strip()
if opts:
info = dict([s.split('=') for s in opts.split()])
if 'width' in info and 'height' in info:
size = '|%sx%spx' % (info['width'], info['height'])
elif 'width' in info:
size = '|%spx' % info['width']
elif 'height' in info:
size = '|x%spx' % info['height']
if link:
# We link to some image on the web
filename = os.path.basename(filename)
link = os.path.dirname(link)
result = r"""
[[File:%s|frame%s|link=%s|alt=%s%s]]
""" % (filename, size, link, filename, caption)
else:
# We try to link to a file at wikimedia.org.
found_wikimedia = False
orig_filename = filename
# Check if the file exists and find the appropriate wikimedia name.
# http://en.wikipedia.org/w/api.php?action=query&titles=Image:filename&prop=imageinfo&format=xml
# Skip directories - get the basename
filename = os.path.basename(filename)
import urllib
prms = urllib.urlencode({
'action': 'query', 'titles': 'Image:' + filename,
'prop': 'imageinfo', 'format': 'xml'})
url = 'http://en.wikipedia.org/w/api.php?' + prms
try:
print ' ...checking if %s is stored at en.wikipedia.org/w/api.php...' % filename
f = urllib.urlopen(url)
imageinfo = f.read()
f.close()
def get_data(name, text):
pattern = '%s="(.*?)"' % name
m = re.search(pattern, text)
if m:
match = m.group(1)
if 'Image:' in match:
return match.split('Image:')[1]
if 'File:' in match:
return match.split('File:')[1]
else:
return match
else:
return None
data = ['from', 'to', 'title', 'missing', 'imagerepository',
'timestamp', 'user']
orig_filename = filename
filename = get_data('title', imageinfo)
user = get_data('user', imageinfo)
timestamp = get_data('timestamp', imageinfo)
if user:
found_wikimedia = True
print ' ...found %s at wikimedia' % filename
result = r"""
[[File:%s|frame%s|alt=%s%s]] <!-- user: %s, filename: %s, timestamp: %s -->
""" % (filename, size, filename, caption, user, orig_filename, timestamp)
except IOError:
print ' ...no Internet connection...'
if not found_wikimedia:
print ' ...for wikipedia/wikibooks you must upload image file %s to\n common.wikimedia.org' % orig_filename
# see http://commons.wikimedia.org/wiki/Commons:Upload
# and http://commons.wikimedia.org/wiki/Special:UploadWizard
print ' ...for now we use local file %s' % filename
# This is fine if we use github wiki
result = r"""
[[File:%s|frame%s|alt=%s%s]] <!-- not yet uploaded to common.wikimedia.org -->
""" % (filename, size, filename, caption)
return result
from common import table_analysis
def mwiki_author(authors_and_institutions, auth2index,
inst2index, index2inst, auth2email):
authors = []
for author, i, email in authors_and_institutions:
if email is None:
email_text = ''
else:
name, adr = email.split('@')
email_text = ' (%s at %s)' % (name, adr)
authors.append('_%s_%s' % (author, email_text))
if len(authors) == 1:
authors = authors[0]
elif len(authors) == 2:
authors = authors[0] + ' and ' + authors[1]
elif len(authors) > 2:
authors[-1] = 'and ' + authors[-1]
authors = ', '.join(authors)
else:
# no authors:
return ''
text = '\n\nBy ' + authors + '\n\n'
# we skip institutions in mwiki
return text
from gwiki import wiki_ref_and_label_common
def mwiki_ref_and_label(section_label2title, format, filestr):
return wiki_ref_and_label_common(section_label2title, format, filestr)
def mwiki_admon(block, format, title='Warning', text_size='normal',
admon_type='warning'):
if title.lower().strip() == 'none':
title = ''
# Blocks without explicit title should have empty title
if title == 'Block':
title = ''
if title and title[-1] not in ('.', ':', '!', '?'):
# Make sure the title ends with puncuation
title += '.'
admon_type2mwiki = dict(notice='notice',
warning='warning', # or critical or important
hint='notice',
quote='quote')
if admon_type in admon_type2mwiki:
admon_type = admon_type2mwiki[admon_type] # use mwiki admon
else:
admon_type = title # Just use the title
text = "'''%s''' " % title + block
if text_size == 'normal':
text_size = '90%'
elif text_size == 'large':
text_size = '130%'
elif text_size == 'small':
text_size = '80%'
if admon_type == 'quote':
s = """
{{quote box
| quote = %s
| textstyle = font-size: %s;
}}
""" % (block, text_size)
# quote has also | source = ... but other formats like
# latex and html have no specific source tag, so it must
# be typeset manually
else:
s = """
{{mbox
| type = %s
| textstyle = font-size: %s;
| text = %s
}}
""" % (admon_type, text_size, text)
return s
# mbox: notice
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
# all arguments are dicts and accept in-place modifications (extensions)
FILENAME_EXTENSION['mwiki'] = '.mwiki' # output file extension
BLANKLINE['mwiki'] = '\n'
# replacement patterns for substitutions of inline tags
INLINE_TAGS_SUBST['mwiki'] = {
'math': r'\g<begin><math>\g<subst></math>\g<end>',
'math2': r'\g<begin><math>\g<latexmath></math>\g<end>',
'emphasize': r"\g<begin>''\g<subst>''\g<end>",
'bold': r"\g<begin>'''\g<subst>'''\g<end>",
'verbatim': r'\g<begin><code>\g<subst></code>\g<end>',
#'linkURL': r'\g<begin>[\g<url> \g<link>]\g<end>',
'linkURL2': r'[\g<url> \g<link>]',
'linkURL3': r'[\g<url> \g<link>]',
'linkURL2v': r'[\g<url> <code>\g<link></code>]',
'linkURL3v': r'[\g<url> <code>\g<link></code>]',
'plainURL': r'\g<url>',
'colortext': r'<font color="\g<color>">\g<text></font>',
'chapter': r"""== '''\g<subst>''' ==""",
'section': r'== \g<subst> ==',
'subsection': r'=== \g<subst> ===',
'subsubsection': r'==== \g<subst> ====\n',
'paragraph': r"''\g<subst>''\n",
'title': r'#TITLE (actually governed by the filename): \g<subst>\n',
'date': r'===== \g<subst> =====',
'author': mwiki_author, #r'===== \g<name>, \g<institution> =====',
# 'figure': r'<\g<filename>>',
'figure': mwiki_figure,
'movie': default_movie, # will not work for HTML movie player
'comment': '<!-- %s -->',
'abstract': r'\n*\g<type>.* \g<text>\g<rest>',
'linebreak': r'\g<text><br />',
'non-breaking-space': ' ',
'horizontal-rule': '----',
'ampersand2': r' \g<1>&\g<2>',
}
CODE['mwiki'] = mwiki_code
from html import html_table
TABLE['mwiki'] = html_table
ENVIRS['mwiki'] = {
'warning': lambda block, format, title='Warning', text_size='normal':
mwiki_admon(block, format, title, text_size, 'warning'),
'notice': lambda block, format, title='Notice', text_size='normal':
mwiki_admon(block, format, title, text_size, 'notice'),
'question': lambda block, format, title='Question', text_size='normal':
mwiki_admon(block, format, title, text_size, 'question'),
'hint': lambda block, format, title='Hint', text_size='normal':
mwiki_admon(block, format, title, text_size, 'hint'),
'summary': lambda block, format, title='Summary', text_size='normal':
mwiki_admon(block, format, title, text_size, 'summary'),
'block': lambda block, format, title='Block', text_size='normal':
mwiki_admon(block, format, title, text_size, 'block'),
'box': lambda block, format, title='none', text_size='normal':
mwiki_admon(block, format, title, text_size, 'box'),
'quote': lambda block, format, title='none', text_size='normal':
mwiki_admon(block, format, title, text_size, 'quote'),
}
# native list:
LIST['mwiki'] = {
'itemize': {'begin': '\n', 'item': '*', 'end': '\n\n'},
'enumerate': {'begin': '\n', 'item': '#', 'end': '\n\n'},
'description': {'begin': '\n', 'item': '* %s ', 'end': '\n\n'},
'separator': '\n'}
# Try this:
LIST['mwiki'] = LIST['html']
# how to typeset description lists for function arguments, return
# values, and module/class variables:
ARGLIST['mwiki'] = {
'parameter': '*argument*',
'keyword': '*keyword argument*',
'return': '*return value(s)*',
'instance variable': '*instance variable*',
'class variable': '*class variable*',
'module variable': '*module variable*',
}
FIGURE_EXT['mwiki'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['mwiki'] = mwiki_ref_and_label
from plaintext import plain_index_bib
EXERCISE['mwiki'] = plain_exercise
INDEX_BIB['mwiki'] = plain_index_bib
TOC['mwiki'] = lambda s: '<<<TOC>>>' # __TOC__ will be wrongly translated to paragraph headline and needs a fix
QUIZ['mwiki'] = plain_quiz
# document start:
INTRO['mwiki'] = ''
| bsd-3-clause | 7,633,075,306,408,647,000 | 37.531674 | 122 | 0.557278 | false |
andyeff/skybot | plugins/giantbomb.py | 1 | 1360 | from urllib2 import HTTPError
from util import hook, http
@hook.command(autohelp=False)
def gb(inp):
'.gb - lists upcoming shows on Giant Bomb'
url = 'http://www.giantbomb.com'
try:
doc = http.get_html(url)
except HTTPError as e:
errors = {400: 'bad request (ratelimited?) 400',
401: 'unauthorized 401 ',
403: 'forbidden 403',
404: 'invalid user/id 404',
500: 'something is broken 500',
502: 'something is down ("getting upgraded?") 502',
503: 'something is overloaded 503',
410: 'something something 410'}
if e.code == 404:
return 'bad url?'
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown %s' % e.code
if not doc.find_class("promo-upcoming"):
return "no results found!"
upcoming = doc.find_class("promo-upcoming")[0]
uptitles = upcoming.xpath('.//h4[@class="title"]')
uptimes = upcoming.xpath('.//p[@class="time"]')
list_titles = [x.text_content() for x in uptitles]
list_times = [x.text_content() for x in uptimes]
shows = zip(list_titles, list_times)
res = " | ".join(' - '.join(i) for i in shows)
if len(res) > 420:
res = res[0:420] + " ..."
return res
| unlicense | 7,194,837,047,324,547,000 | 29.909091 | 69 | 0.548529 | false |
digwanderlust/pants | tests/python/pants_test/backend/jvm/tasks/test_ivy_utils.py | 1 | 12440 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import xml.etree.ElementTree as ET
from textwrap import dedent
from mock import Mock
from pants.backend.core.register import build_file_aliases as register_core
from pants.backend.jvm.ivy_utils import IvyModuleRef, IvyUtils
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.exclude import Exclude
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.util.contextutil import temporary_dir, temporary_file_path
from pants_test.base_test import BaseTest
class IvyUtilsTestBase(BaseTest):
@property
def alias_groups(self):
return register_core().merge(register_jvm())
class IvyUtilsGenerateIvyTest(IvyUtilsTestBase):
# TODO(John Sirois): increase coverage.
# Some examples:
# + multiple confs - via with_sources and with_docs for example
# + excludes
# + classifiers
# + with_artifact
def setUp(self):
super(IvyUtilsGenerateIvyTest, self).setUp()
self.add_to_build_file('src/java/targets',
dedent("""
jar_library(
name='a',
jars=[
jar('org1', 'name1', 'rev1'),
jar('org2', 'name2', 'rev2', force=True),
],
)
"""))
self.b_org = 'com.example'
self.b_name = 'b'
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='b',
dependencies=[':a'],
provides=artifact('{org}', '{name}', repo=Repository()),
sources=['z.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.add_to_build_file('3rdparty',
dedent("""
jar_library(
name='example-morx',
jars = [
jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='morx'),
]
)
jar_library(
name='example-fleem',
jars = [
jar(org='commons-lang', name='commons-lang', rev='2.5', classifier='fleem'),
]
)
"""))
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='c',
dependencies=[
'3rdparty:example-morx',
'3rdparty:example-fleem',
],
sources=['w.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.add_to_build_file('src/java/targets',
dedent("""
java_library(
name='e',
dependencies=[
'3rdparty:example-morx',
'3rdparty:example-fleem',
],
excludes=[exclude(org='commons-lang', name='commons-lang')],
sources=['w.java'],
)
""".format(org=self.b_org, name=self.b_name)))
self.a = self.target('src/java/targets:a')
self.b = self.target('src/java/targets:b')
self.c = self.target('src/java/targets:c')
self.e = self.target('src/java/targets:e')
def test_exclude_exported(self):
_, excludes = IvyUtils.calculate_classpath([self.b])
self.assertEqual(excludes, {Exclude(org=self.b_org, name=self.b_name)})
def test_exclude_exported_disabled(self):
_, excludes = IvyUtils.calculate_classpath([self.b], automatic_excludes=False)
self.assertSetEqual(excludes, set())
def test_exclude_exported_disabled_when_no_excludes_gathered(self):
_, excludes = IvyUtils.calculate_classpath([self.b], gather_excludes=False)
self.assertSetEqual(excludes, set())
def test_excludes_generated_when_requested(self):
_, excludes = IvyUtils.calculate_classpath([self.e], gather_excludes=True)
self.assertSetEqual(excludes, {Exclude(org='commons-lang', name='commons-lang')})
def test_excludes_empty_when_not_requested(self):
_, excludes = IvyUtils.calculate_classpath([self.e], gather_excludes=False)
self.assertSetEqual(excludes, set())
def test_classifiers(self):
jars, _ = IvyUtils.calculate_classpath([self.c])
jars.sort(key=lambda jar : jar.classifier)
self.assertEquals(['fleem', 'morx'], [jar.classifier for jar in jars])
def test_force_override(self):
jars = list(self.a.payload.jars)
with temporary_file_path() as ivyxml:
IvyUtils.generate_ivy([self.a], jars=jars, excludes=[], ivyxml=ivyxml, confs=['default'])
doc = ET.parse(ivyxml).getroot()
conf = self.find_single(doc, 'configurations/conf')
self.assert_attributes(conf, name='default')
dependencies = list(doc.findall('dependencies/dependency'))
self.assertEqual(2, len(dependencies))
dep1 = dependencies[0]
self.assert_attributes(dep1, org='org1', name='name1', rev='rev1')
conf = self.find_single(dep1, 'conf')
self.assert_attributes(conf, name='default', mapped='default')
dep2 = dependencies[1]
self.assert_attributes(dep2, org='org2', name='name2', rev='rev2', force='true')
conf = self.find_single(dep1, 'conf')
self.assert_attributes(conf, name='default', mapped='default')
override = self.find_single(doc, 'dependencies/override')
self.assert_attributes(override, org='org2', module='name2', rev='rev2')
def test_resove_conflict(self):
v1 = Mock()
v1.force = False
v1.rev ="1"
v1_force = Mock()
v1_force.force = True
v1_force.rev = "1"
v2 = Mock()
v2.force = False
v2.rev = "2"
# If neither version is forced, use the latest version
self.assertIs(v2, IvyUtils._resolve_conflict(v1, v2))
self.assertIs(v2, IvyUtils._resolve_conflict(v2, v1))
# If an earlier version is forced, use the forced version
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v2))
self.assertIs(v1_force, IvyUtils._resolve_conflict(v2, v1_force))
# If the same version is forced, use the forced version
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1, v1_force))
self.assertIs(v1_force, IvyUtils._resolve_conflict(v1_force, v1))
def test_does_not_visit_diamond_dep_twice(self):
ivy_info = self.parse_ivy_report('tests/python/pants_test/tasks/ivy_utils_resources/report_with_diamond.xml')
ref = IvyModuleRef("toplevel", "toplevelmodule", "latest")
seen = set()
def collector(r):
self.assertNotIn(r, seen)
seen.add(r)
return set([r])
result = ivy_info.traverse_dependency_graph(ref, collector)
self.assertEqual(
{
IvyModuleRef("toplevel", "toplevelmodule", "latest"),
IvyModuleRef(org='org1', name='name1', rev='0.0.1'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1')
},
result)
def test_does_not_follow_cycle(self):
ivy_info = self.parse_ivy_report('tests/python/pants_test/tasks/ivy_utils_resources/report_with_cycle.xml')
ref = IvyModuleRef("toplevel", "toplevelmodule", "latest")
seen = set()
def collector(r):
self.assertNotIn(r, seen)
seen.add(r)
return set([r])
result = ivy_info.traverse_dependency_graph(ref, collector)
self.assertEqual(
{
IvyModuleRef("toplevel", "toplevelmodule", "latest"),
IvyModuleRef(org='org1', name='name1', rev='0.0.1'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1')
},
result)
def test_memo_reused_across_calls(self):
ivy_info = self.parse_ivy_report('tests/python/pants_test/tasks/ivy_utils_resources/report_with_diamond.xml')
ref = IvyModuleRef(org='org1', name='name1', rev='0.0.1')
def collector(r):
return set([r])
memo = dict()
result1 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo)
result2 = ivy_info.traverse_dependency_graph(ref, collector, memo=memo)
self.assertIs(result1, result2)
self.assertEqual(
{
IvyModuleRef(org='org1', name='name1', rev='0.0.1'),
IvyModuleRef(org='org2', name='name2', rev='0.0.1'),
IvyModuleRef(org='org3', name='name3', rev='0.0.1')
},
result1)
def find_single(self, elem, xpath):
results = list(elem.findall(xpath))
self.assertEqual(1, len(results))
return results[0]
def assert_attributes(self, elem, **kwargs):
self.assertEqual(dict(**kwargs), dict(elem.attrib))
def test_find_new_symlinks(self):
map1 = { 'foo' : 'bar'}
map2 = { }
diff_map = IvyUtils._find_new_symlinks(map1, map2)
self.assertEquals({}, diff_map)
diff_map = IvyUtils._find_new_symlinks(map2, map1)
self.assertEquals({'foo' : 'bar'}, diff_map)
def test_symlink_cachepath(self):
self.maxDiff = None
with temporary_dir() as mock_cache_dir:
with temporary_dir() as symlink_dir:
with temporary_dir() as classpath_dir:
input_path = os.path.join(classpath_dir, 'inpath')
output_path = os.path.join(classpath_dir, 'classpath')
existing_symlink_map = {}
foo_path = os.path.join(mock_cache_dir, 'foo.jar')
with open(foo_path, 'w') as foo:
foo.write("test jar contents")
with open(input_path, 'w') as inpath:
inpath.write(foo_path)
result_map = IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir,
output_path, existing_symlink_map)
symlink_foo_path = os.path.join(symlink_dir, 'foo.jar')
self.assertEquals(
{
os.path.realpath(foo_path) : symlink_foo_path
},
result_map)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_foo_path, outpath.readline())
self.assertTrue(os.path.islink(symlink_foo_path))
self.assertTrue(os.path.exists(symlink_foo_path))
# Now add an additional path to the existing map
bar_path = os.path.join(mock_cache_dir, 'bar.jar')
with open(bar_path, 'w') as bar:
bar.write("test jar contents2")
with open(input_path, 'w') as inpath:
inpath.write(os.pathsep.join([foo_path, bar_path]))
existing_symlink_map = result_map
result_map = IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir,
output_path, existing_symlink_map)
symlink_bar_path = os.path.join(symlink_dir, 'bar.jar')
self.assertEquals(
{
os.path.realpath(foo_path) : symlink_foo_path,
os.path.realpath(bar_path) : symlink_bar_path,
},
result_map)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_foo_path + os.pathsep + symlink_bar_path, outpath.readline())
self.assertTrue(os.path.islink(symlink_foo_path))
self.assertTrue(os.path.exists(symlink_foo_path))
self.assertTrue(os.path.islink(symlink_bar_path))
self.assertTrue(os.path.exists(symlink_bar_path))
# Reverse the ordering and make sure order is preserved in the output path
with open(input_path, 'w') as inpath:
inpath.write(os.pathsep.join([bar_path, foo_path]))
IvyUtils.symlink_cachepath(mock_cache_dir, input_path, symlink_dir,
output_path, result_map)
with open(output_path, 'r') as outpath:
self.assertEquals(symlink_bar_path + os.pathsep + symlink_foo_path, outpath.readline())
def test_missing_ivy_report(self):
self.set_options_for_scope(IvySubsystem.options_scope, cache_dir='DOES_NOT_EXIST', use_nailgun=False)
# Hack to initialize Ivy subsystem
self.context()
with self.assertRaises(IvyUtils.IvyResolveReportError):
IvyUtils.parse_xml_report('INVALID_REPORT_UNIQUE_NAME', 'default')
def parse_ivy_report(self, path):
ivy_info = IvyUtils._parse_xml_report(path)
self.assertIsNotNone(ivy_info)
return ivy_info
| apache-2.0 | 5,113,953,510,214,122,000 | 35.696165 | 113 | 0.61037 | false |
cc-archive/commoner | src/commoner/broadcast/context_processors.py | 1 | 1043 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from commoner.broadcast.models import Message, Log
def messages(request):
if request.user.is_authenticated():
messages = Message.active.all()
site_messages = []
for message in messages:
try:
# it exists in the log
log = Log.objects.get(user=request.user, message=message)
# the user hasn't acked
if message.ack_req and not log.acked:
# show the alert
site_messages.append(message)
except:
site_messages.append(message)
Log(
user = request.user,
message = message,
acked = False
).save()
return {'site_messages' : site_messages}
else:
return {} | agpl-3.0 | 3,577,103,782,968,181,000 | 28 | 73 | 0.466922 | false |
telefonicaid/murano-agent | muranoagent/app.py | 1 | 8000 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import types
import bunch
import semver
from muranoagent.common import config
from muranoagent.common import messaging
from muranoagent import exceptions as exc
from muranoagent import execution_plan_queue
from muranoagent import execution_plan_runner
from muranoagent import execution_result as ex_result
from muranoagent.openstack.common import log as logging
from muranoagent.openstack.common import service
CONF = config.CONF
LOG = logging.getLogger(__name__)
format_version = '2.0.0'
class MuranoAgent(service.Service):
def __init__(self):
self._queue = execution_plan_queue.ExecutionPlanQueue()
super(MuranoAgent, self).__init__()
@staticmethod
def _load_package(name):
try:
LOG.debug('Loading plugin %s', name)
__import__(name)
except Exception:
LOG.warn('Cannot load package %s', name, exc_info=True)
pass
def _load(self):
path = os.path.join(os.path.dirname(__file__), 'executors')
sys.path.insert(1, path)
for entry in os.listdir(path):
package_path = os.path.join(path, entry)
if os.path.isdir(package_path):
MuranoAgent._load_package(entry)
def start(self):
self._load()
msg_iterator = self._wait_plan()
while True:
try:
self._loop_func(msg_iterator)
except Exception as ex:
LOG.exception(ex)
time.sleep(5)
def _loop_func(self, msg_iterator):
result, timestamp = self._queue.get_execution_plan_result()
if result is not None:
if self._send_result(result):
self._queue.remove(timestamp)
return
plan = self._queue.get_execution_plan()
if plan is not None:
LOG.debug("Got an execution plan '{0}':".format(str(plan)))
self._run(plan)
return
msg_iterator.next()
def _run(self, plan):
with execution_plan_runner.ExecutionPlanRunner(plan) as runner:
try:
result = runner.run()
execution_result = ex_result.ExecutionResult.from_result(
result, plan)
self._queue.put_execution_result(execution_result, plan)
except Exception as ex:
LOG.exception('Error running execution plan')
execution_result = ex_result.ExecutionResult.from_error(ex,
plan)
self._queue.put_execution_result(execution_result, plan)
def _send_result(self, result):
with self._create_rmq_client() as mq:
msg = messaging.Message()
msg.body = result
msg.id = result.get('SourceID')
mq.send(message=msg,
key=CONF.rabbitmq.result_routing_key,
exchange=CONF.rabbitmq.result_exchange)
return True
def _create_rmq_client(self):
rabbitmq = CONF.rabbitmq
connection_params = {
'login': rabbitmq.login,
'password': rabbitmq.password,
'host': rabbitmq.host,
'port': rabbitmq.port,
'virtual_host': rabbitmq.virtual_host,
'ssl': rabbitmq.ssl,
'ca_certs': rabbitmq.ca_certs.strip() or None
}
return messaging.MqClient(**connection_params)
def _wait_plan(self):
delay = 5
while True:
try:
with self._create_rmq_client() as mq:
with mq.open(CONF.rabbitmq.input_queue,
prefetch_count=1) as subscription:
while True:
msg = subscription.get_message(timeout=5)
if msg is not None and isinstance(msg.body, dict):
self._handle_message(msg)
if msg is not None:
msg.ack()
yield
delay = 5
except KeyboardInterrupt:
break
except Exception:
LOG.warn('Communication error', exc_info=True)
time.sleep(delay)
delay = min(delay * 1.2, 60)
def _handle_message(self, msg):
print(msg.body)
if 'ID' not in msg.body and msg.id:
msg.body['ID'] = msg.id
err = self._verify_plan(msg.body)
if err is None:
self._queue.put_execution_plan(msg.body)
else:
try:
execution_result = ex_result.ExecutionResult.from_error(
err, bunch.Bunch(msg.body))
self._send_result(execution_result)
except ValueError:
LOG.warn('Execution result is not produced')
def _verify_plan(self, plan):
plan_format_version = plan.get('FormatVersion', '1.0.0')
if semver.compare(plan_format_version, '2.0.0') > 0 or \
semver.compare(plan_format_version, format_version) < 0:
range_str = 'in range 2.0.0-{0}'.format(plan_format_version) \
if format_version != '2.0.0' \
else 'equal to {0}'.format(format_version)
return exc.AgentException(
3,
'Unsupported format version {0} (must be {1})'.format(
plan_format_version, range_str))
for attr in ('Scripts', 'Files', 'Options'):
if attr in plan and not isinstance(
plan[attr], types.DictionaryType):
return exc.AgentException(
2, '{0} is not a dictionary'.format(attr))
for name, script in plan.get('Scripts', {}).items():
for attr in ('Type', 'EntryPoint'):
if attr not in script or not isinstance(
script[attr], types.StringTypes):
return exc.AgentException(
2, 'Incorrect {0} entry in script {1}'.format(
attr, name))
if not isinstance(script.get('Options', {}), types.DictionaryType):
return exc.AgentException(
2, 'Incorrect Options entry in script {0}'.format(name))
if script['EntryPoint'] not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses entry point {1}'.format(
name, script['EntryPoint']))
for additional_file in script.get('Files', []):
if additional_file not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses file {1}'.format(
name, additional_file))
for key, plan_file in plan.get('Files', {}).items():
for attr in ('BodyType', 'Body', 'Name'):
if attr not in plan_file:
return exc.AgentException(
2, 'Incorrect {0} entry in file {1}'.format(
attr, key))
if plan_file['BodyType'] not in ('Text', 'Base64'):
return exc.AgentException(
2, 'Incorrect BodyType in file {1}'.format(key))
return None
| apache-2.0 | -4,816,543,227,355,885,000 | 36.735849 | 79 | 0.541375 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.