repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
arangodb/arangodb | 3rdParty/V8/v7.9.317/test/wasm-spec-tests/testcfg.py | 1 | 1810 | # Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from testrunner.local import testsuite
from testrunner.objects import testcase
proposal_flags = [{
'name': 'reference-types',
'flags': ['--experimental-wasm-anyref',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'bulk-memory-operations',
'flags': ['--experimental-wasm-bulk-memory']
},
{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--no-experimental-wasm-bulk-memory']
},
{
'name': 'JS-BigInt-integration',
'flags': ['--experimental-wasm-bigint']
},
]
class TestLoader(testsuite.JSTestLoader):
pass
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def _get_files_params(self):
return [os.path.join(self.suite.test_root, self.path + self._get_suffix())]
def _get_source_flags(self):
for proposal in proposal_flags:
if os.sep.join(['proposals', proposal['name']]) in self.path:
return proposal['flags']
return []
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
| apache-2.0 | 3,684,658,374,831,637,500 | 30.754386 | 79 | 0.550276 | false |
pascalmouret/treeio-achievements | achievements/views.py | 1 | 13734 | """
Here are the functions which actually prepare the data and render the pages.
Most of the functions here are very similar since tree.io is, more or less, following
the CRUD (Create, Retrieve, Update, Delete) pattern.
The only thing special are the MassForms, which are quite common in tree.io and I only
adapted the code to fit my purposes.
Also: The forms.py file is in many ways more important since all forms are defined there.
"""
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from treeio.core.models import User
from treeio.core.rendering import render_to_response
from treeio.core.decorators import treeio_login_required, handle_response_format
from achievements.forms import MassActionUserForm, MassActionUserAchievementsForm, MassActionAchievementsForm, \
PrototypeForm, AchievementForm
from achievements.models import Prototype, Achievement
def _get_default_context(request, type):
"""
This function generates a context with a prepared massform.
Arguments:
request -- a Django Request object
type -- the type of MassForm you want
"""
context = {}
massform = type(request.user.get_profile())
context.update({'massform': massform})
return context
def _process_mass_form(f):
"""
This decorator checks if and which mass-form type is received and reacts in a proper fashion. (read: saves)
By excluding this, the views themselfes get a bit less crowded. And it is the way it is in every other module
as well.
Arguments:
f -- the function that is decorated
"""
def wrap(request, *args, **kwargs):
"""
Checks first which MassForm we are dealing with, then check if the user has the necessary permission.
If that all checks out, execute the save() action.
Arguments:
request -- the Django-request
*args -- catch args to pass them on afterwards
**kwargs -- catch kwargs to pass them on afterwards
"""
user = request.user.get_profile()
# check for massform and check permission
if 'massform' in request.POST and request.user.get_profile().is_admin(module_name='achievements'):
for key in request.POST:
if 'mass-user' in key:
try:
user = User.objects.get(pk=request.POST[key])
form = MassActionUserForm(request.user.get_profile(), request.POST, instance=user)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-achievement' in key:
try:
prototype = Prototype.objects.get(pk=request.POST[key])
form = MassActionAchievementsForm(request.user.get_profile(), request.POST, instance=prototype)
if form.is_valid():
form.save()
except Exception:
pass
if 'mass-userachievement' in key:
try:
achievement = Achievement.objects.get(pk=request.POST[key])
form = MassActionUserAchievementsForm(request.user.get_profile(),
request.POST, instance=achievement)
if form.is_valid():
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@handle_response_format
@treeio_login_required
@_process_mass_form
def index(request, response_format='html'):
"""
This view displays a list of user, with their achievements (icons). Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
users = User.objects.all()
context = _get_default_context(request, MassActionUserForm)
context.update({'users': users})
return render_to_response('achievements/index', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def user(request, user_id, response_format='html'):
"""
This just displays one user and his achievements. Has a MassForm.
Arguments:
request -- a Django Request object
user_id -- the id of the requested User object
response_format -- defines which format the response should be
"""
user = User.objects.get(pk=user_id)
achievements = Achievement.objects.filter(user=user)
context = _get_default_context(request, MassActionUserAchievementsForm)
context.update({'u': user, 'achievements': achievements})
return render_to_response('achievements/user', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def prototypes(request, response_format='html'):
"""
Gives an overview over all available Achievements, with the description. Has a MassForm.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
prototypes = Prototype.objects.filter(trash=False)
context = _get_default_context(request, MassActionAchievementsForm)
context.update({'protos': prototypes})
return render_to_response('achievements/prototypes', context, context_instance=RequestContext(request),
response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_add(request, response_format='html'):
"""
This delivers a view to create a new Prototype.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
prototype = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_edit(request, prototype_id, response_format='html'):
"""
Opens a form to edit a Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if not request.user.get_profile().has_permission(prototype, mode='w'):
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
if request.POST:
if not 'cancel' in request.POST:
form = PrototypeForm(request.user.get_profile(), request.POST, files=request.FILES, instance=prototype)
if form.is_valid():
prototype = form.save()
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
else:
return HttpResponseRedirect(reverse('achievements_prototypes'))
else:
form = PrototypeForm(request.user, instance=prototype)
return render_to_response('achievements/prototype_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_detail(request, prototype_id, response_format='html'):
"""
Opens a simple overview for one Prototype.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
return render_to_response('achievements/prototype_detail', {'prototype': prototype},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def prototype_delete(request, prototype_id, response_format='html'):
"""
Simply deletes a Prototype and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
prototype_id -- the id of the requested Prototype object
response_format -- defines which format the response should be
"""
prototype = get_object_or_404(Prototype, pk=prototype_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
prototype.delete()
else:
return HttpResponseRedirect(reverse('achievements_prototype_detail', args=[prototype.id]))
return HttpResponseRedirect(reverse('achievements_prototypes'))
@handle_response_format
@treeio_login_required
def achievement_add(request, response_format='html'):
"""
Opens an empty form for a new Achievement.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_edit(request, achievement_id, response_format='html'):
"""
Opens a form to edit a specific Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.POST:
if not 'cancel' in request.POST:
form = AchievementForm(request.user.get_profile(), request.POST, files=request.FILES, instance=achievement)
if form.is_valid():
achievement = form.save() # TODO: saver
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
else:
return HttpResponseRedirect(reverse('achievements'))
else:
form = AchievementForm(request.user, instance=achievement)
return render_to_response('achievements/achievement_form', {'form': form},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_detail(request, achievement_id, response_format='html'):
"""
Opens a simple overview for one Achievement.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
return render_to_response('achievements/achievement_detail', {'achievement': achievement},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def achievement_delete(request, achievement_id, response_format='html'):
"""
Simply deletes a Achievement and redirects to the list. If the permissions are alright, of course.
Arguments:
request -- a Django Request object
achievement_id -- the id of the requested Achievement object
response_format -- defines which format the response should be
"""
achievement = get_object_or_404(Achievement, pk=achievement_id)
if request.user.get_profile().has_permission(Prototype, mode='w'):
achievement.delete()
else:
return HttpResponseRedirect(reverse('achievements_achievement_detail', args=[achievement.id]))
return HttpResponseRedirect(reverse('achievements'))
@handle_response_format
@treeio_login_required
def widget_achievement_stream(request, response_format='html'):
"""
Gets the last three Achievements and gives them to the widget template. This will be rendered as the Widget.
Arguments:
request -- a Django Request object
response_format -- defines which format the response should be
"""
achievements = Achievement.objects.all()[:3]
return render_to_response('achievements/widgets/newest', {'achievements': achievements},
context_instance=RequestContext(request), response_format=response_format)
| bsd-2-clause | 7,595,113,110,143,997,000 | 39.040816 | 119 | 0.66754 | false |
gdestuynder/MozDef | alerts/cloudtrail_logging_disabled.py | 1 | 1132 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertCloudtrailLoggingDisabled(AlertTask):
def main(self):
search_query = SearchQuery(minutes=30)
search_query.add_must([
TermMatch('source', 'cloudtrail'),
TermMatch('eventname', 'StopLogging')
])
search_query.add_must_not(TermMatch('errorcode', 'AccessDenied'))
self.filtersManual(search_query)
self.searchEventsSimple()
self.walkEvents()
def onEvent(self, event):
category = 'AWSCloudtrail'
tags = ['cloudtrail', 'aws', 'cloudtrailpagerduty']
severity = 'CRITICAL'
summary = 'Cloudtrail Logging Disabled: ' + event['_source']['requestparameters']['name']
return self.createAlertDict(summary, category, tags, [event], severity)
| mpl-2.0 | -3,568,129,083,789,123,600 | 32.294118 | 97 | 0.671378 | false |
Thortoise/Super-Snake | Blender/animation_nodes-master/operators/profiling.py | 1 | 2009 | import bpy
import cProfile
from bpy.props import *
from io import StringIO
from contextlib import redirect_stdout
class ProfileAnimationNodes(bpy.types.Operator):
bl_idname = "an.profile"
bl_label = "Profile"
function = StringProperty()
output = StringProperty()
sort = StringProperty()
def execute(self, context):
result = self.getProfilingResult()
if self.output == "CONSOLE":
print(result)
elif self.output == "TEXT_BLOCK":
textBlock = self.getOutputTextBlock()
textBlock.clear()
textBlock.write(result)
return {"FINISHED"}
def getProfilingResult(self):
resultBuffer = StringIO()
with redirect_stdout(resultBuffer):
d = {"function" : self.executeFunction}
cProfile.runctx("function()", d, d, sort = self.sort)
self.executeFunction()
return resultBuffer.getvalue()
def executeFunction(self):
if self.function == "EXECUTION":
execute_TreeExecutiong()
elif self.function == "TREE_ANALYSIS":
execute_TreeAnalysis()
elif self.function == "UPDATE_EVERYTHING":
execute_UpdateEverything()
elif self.function == "SCRIPT_GENERATION":
execute_ScriptGeneration()
def getOutputTextBlock(self):
textBlockName = "Profiling"
if textBlockName in bpy.data.texts:
return bpy.data.texts[textBlockName]
else:
return bpy.data.texts.new(textBlockName)
def execute_TreeExecutiong():
bpy.context.space_data.edit_tree.execute()
def execute_TreeAnalysis():
from .. import tree_info
tree_info.update()
def execute_UpdateEverything():
from .. import update
update.updateEverything()
def execute_ScriptGeneration():
from .. execution import units
from .. utils.nodes import createNodeByIdDict
nodeByID = createNodeByIdDict()
units.createExecutionUnits(nodeByID)
nodeByID.clear()
| gpl-3.0 | -5,066,411,222,461,000,000 | 28.115942 | 65 | 0.645595 | false |
dvl/pyclub | pyclub/content/tests.py | 1 | 1153 | # -*- coding: utf-8 -*-
from itertools import cycle
from django.test import TestCase
from model_mommy.recipe import Recipe
from .models import Post
class TestPostModel(TestCase):
def setUp(self):
self.recipe = Recipe(Post, slug=None)
def test_str(self):
post = self.recipe.make(title='foobar')
self.assertEqual(post.__str__(), 'foobar')
def test_get_absolute_url(self):
post = self.recipe.make(title='foobar')
self.assertEqual(post.get_absolute_url(), '/foobar/')
def test_queryset(self):
# cria 2 posts
self.recipe.make(status=cycle([Post.FINISHED, Post.DRAFT]), _quantity=2)
# Aprova os finalizados
Post.objects.filter(status=Post.FINISHED).update(approved=True)
# cria mais 2 posts para termos finalizados não aprovados
self.recipe.make(status=cycle([Post.FINISHED, Post.DRAFT]), _quantity=2)
self.assertEqual(Post.objects.finished().count(), 2)
self.assertEqual(Post.objects.draft().count(), 2)
self.assertEqual(Post.objects.approved().count(), 1)
self.assertEqual(Post.objects.live().count(), 1)
| mit | 7,779,069,518,148,872,000 | 27.8 | 80 | 0.655382 | false |
eepgwde/pyeg0 | eg/dispatch0.py | 1 | 1483 | ## @file dispatch0.py
# @author weaves
# @brief Demonstrate extrinisc visitor pattern.
#
# The visitor pattern (or double dispatch) is well-known. In languages
# that support reflection you can use a single dispatch like this
# method.
#
# You can test this package on the command-line with
# <code>python dispatch0.py</code>
#
# @note
# This implementation uses PEAK. And this can be loaded using the
# python-peak.rules packages. Guido van Nossum (Python originator)
# recommends multimethod. In the posting there is a mention of using
# decorators called @when. This version is similiar and uses a
# standard package.
#
# @note
# Python doesn't have function overloading. It is interpreted and
# loosely typed so the concept isn't applicable, so you can
# can achieve run-time overload resolution based on type (and other
# conditions) using these rules. It is probably a form of late-binding
# similar to that of SmallTalk and CLOS.
#
# @see
# www.artima.com/forums/flat.jsp?forum=106&thread=101605
from __future__ import print_function
from peak.rules import abstract, when, around, before, after
@abstract()
def pprint(ob):
"""A pretty-printing generic function"""
@when(pprint, (list,))
def pprint_list(ob):
print("pretty-printing a list")
@when(pprint, "isinstance(ob,list) and len(ob)>=4")
def pprint_long_list(ob):
print("pretty-printing a long list")
if __name__ == '__main__':
pprint(['this', 'that', 'those'])
pprint(['this', 'that', 'those', 'these'])
| gpl-3.0 | -5,117,124,997,730,889,000 | 30.553191 | 70 | 0.727579 | false |
LoIdo/nparty-server | interface/technical/cache.py | 1 | 1049 | import zope.interface
class ICache(zope.interface.Interface):
"""
interface of cache center
wrapper of sth. like memcache and redis
"""
def set_value(key, value, expire):
"""
set key value pair to cache
"""
def get_value(key):
"""
get value by key in cache
"""
def swp_value(key, value, expire):
"""
swap value, if there is already key value pair in cache
"""
def rmv_value(key):
"""
remove key value pair in cache
"""
def rmv_value_match(key, value):
"""
remove key value pair in cache if the input is matched
"""
class ICacheFactory(zope.interface.Interface):
"""
cache center interface factory
"""
def __init__(bundle_factory):
"""
initialize center factory by bundle factory
"""
def __call__(bundle):
"""
create an object which provide cache center interface
""" | gpl-2.0 | -9,053,837,666,354,800,000 | 19.44898 | 63 | 0.521449 | false |
googleapis/googleapis-gen | google/devtools/containeranalysis/v1/devtools-containeranalysis-v1-py/docs/conf.py | 1 | 12687 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# google-cloud-devtools-containeranalysis documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-devtools-containeranalysis"
copyright = u"2020, Google, LLC"
author = u"Google APIs" # TODO: autogenerate this bit
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Devtools Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-devtools-containeranalysis-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-devtools-containeranalysis.tex",
u"google-cloud-devtools-containeranalysis Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-devtools-containeranalysis",
u"Google Cloud Devtools Containeranalysis Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-devtools-containeranalysis",
u"google-cloud-devtools-containeranalysis Documentation",
author,
"google-cloud-devtools-containeranalysis",
"GAPIC library for Google Cloud Devtools Containeranalysis API",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("http://requests.kennethreitz.org/en/stable/", None),
"proto": ("https://proto-plus-python.readthedocs.io/en/stable", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 | 9,167,672,694,095,156,000 | 32.742021 | 87 | 0.698037 | false |
allenlavoie/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 1 | 11315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
tfd = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("tf.distributions.Normal("
"\"Normal\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)"), # Got the dtype right.
str(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("tf.distributions.Chi2("
"\"silly\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)"),
str(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("tf.distributions.Exponential(\"Exponential\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)"),
str(exp))
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("tf.distributions.MultivariateNormalDiag("
"\"MVN\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)"),
str(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("tf.distributions.MultivariateNormalDiag("
"\"MVN2\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
def testReprWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("<tf.distributions.Normal"
" 'Normal'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>"), # Got the dtype right.
repr(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("<tf.distributions.Chi2"
" 'silly'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>"),
repr(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("<tf.distributions.Exponential"
" 'Exponential'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>"),
repr(exp))
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("<tf.distributions.MultivariateNormalDiag"
" 'MVN'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>"),
repr(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("<tf.distributions.MultivariateNormalDiag"
" 'MVN2'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
if __name__ == "__main__":
test.main()
| apache-2.0 | -136,941,286,766,181,460 | 37.486395 | 80 | 0.62563 | false |
aamirmajeedkhan/P4-conference-central | main.py | 1 | 2470 | #!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail,memcache
from conference import ConferenceApi,MEMCACHE_FEATURED_SPEAKER_KEY
from google.appengine.ext import ndb
from models import Session, Speaker
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class SetFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""Set featured speaker in Memcache.
Note:
The featured speaker is updated if there is more than
one session by the given speaker in the provided conference (websafeConferenceKey)
Params:
- websafeConferenceKey
The conference to check for the given speaker
- speaker
The possibly new featured speaker name
"""
# get conference key
key = ndb.Key(urlsafe=self.request.get('websafeConferenceKey'))
# get speaker
speaker = Speaker(name=self.request.get('speaker'))
# get all sessions in the given conference filtered by speaker
featured_sessions = Session.query(ancestor=key).filter(Session.speaker == speaker).fetch()
# If speaker is registered to more than one session, update featured speaker
if len(featured_sessions) >= 2:
session_names = [session.name for session in featured_sessions]
message = speaker.name + ': ' + ', '.join(session_names)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, message)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/set_featured_speaker', SetFeaturedSpeaker)
], debug=True) | apache-2.0 | -3,581,929,336,231,128,000 | 40.881356 | 98 | 0.639271 | false |
RasaHQ/rasa_core | tests/core/test_channels.py | 1 | 29687 | import json
import logging
import mock
import pytest
import sanic
from aioresponses import aioresponses
from httpretty import httpretty
from sanic import Sanic
from rasa.core import utils
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
from rasa.core.utils import EndpointConfig
from tests.core import utilities
from tests.core.conftest import MOODBOT_MODEL_PATH
# this is needed so that the tests included as code examples look better
from tests.core.utilities import json_of_latest_request, latest_request
MODEL_PATH = MOODBOT_MODEL_PATH
logger = logging.getLogger(__name__)
def fake_sanic_run(*args, **kwargs):
"""Used to replace `run` method of a Sanic server to avoid hanging."""
logger.info("Rabatnic: Take this and find Sanic! "
"I want him here by supper time.")
async def test_console_input():
from rasa.core.channels import console
# Overwrites the input() function and when someone else tries to read
# something from the command line this function gets called.
with utilities.mocked_cmd_input(console,
text="Test Input"):
with aioresponses() as mocked:
mocked.post('https://example.com/webhooks/rest/webhook?stream=true',
repeat=True,
payload={})
await console.record_messages(
server_url="https://example.com",
max_message_limit=3)
r = latest_request(
mocked, 'POST',
"https://example.com/webhooks/rest/webhook?stream=true")
assert r
b = json_of_latest_request(r)
assert b == {"message": "Test Input", "sender": "default"}
# USED FOR DOCS - don't rename without changing in the docs
def test_facebook_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.facebook import FacebookInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = FacebookInput(
fb_verify="YOUR_FB_VERIFY",
# you need tell facebook this token, to confirm your URL
fb_secret="YOUR_FB_SECRET", # your app secret
fb_access_token="YOUR_FB_PAGE_ACCESS_TOKEN"
# token for the page you subscribed to
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
print(routes_list)
assert routes_list.get("fb_webhook.health").startswith(
"/webhooks/facebook")
assert routes_list.get("fb_webhook.webhook").startswith(
"/webhooks/facebook/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_webexteams_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.webexteams import WebexTeamsInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = WebexTeamsInput(
access_token="YOUR_ACCESS_TOKEN",
# this is the `bot access token`
room="YOUR_WEBEX_ROOM"
# the name of your channel to which the bot posts (optional)
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("webexteams_webhook.health").startswith(
"/webhooks/webexteams")
assert routes_list.get("webexteams_webhook.webhook").startswith(
"/webhooks/webexteams/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_slack_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.slack import SlackInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = SlackInput(
slack_token="YOUR_SLACK_TOKEN",
# this is the `bot_user_o_auth_access_token`
slack_channel="YOUR_SLACK_CHANNEL"
# the name of your channel to which the bot posts (optional)
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("slack_webhook.health").startswith(
"/webhooks/slack")
assert routes_list.get("slack_webhook.webhook").startswith(
"/webhooks/slack/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_mattermost_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.mattermost import MattermostInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = MattermostInput(
# this is the url of the api for your mattermost instance
url="http://chat.example.com/api/v4",
# the name of your team for mattermost
team="community",
# the username of your bot user that will post
user="[email protected]",
# messages
pw="password"
# the password of your bot user that will post messages
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("mattermost_webhook.health").startswith(
"/webhooks/mattermost")
assert routes_list.get("mattermost_webhook.webhook").startswith(
"/webhooks/mattermost/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_botframework_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.botframework import BotFrameworkInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = BotFrameworkInput(
# you get this from your Bot Framework account
app_id="MICROSOFT_APP_ID",
# also from your Bot Framework account
app_password="MICROSOFT_APP_PASSWORD"
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("botframework_webhook.health").startswith(
"/webhooks/botframework")
assert routes_list.get("botframework_webhook.webhook").startswith(
"/webhooks/botframework/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_rocketchat_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.rocketchat import RocketChatInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = RocketChatInput(
# your bots rocket chat user name
user="yourbotname",
# the password for your rocket chat bots account
password="YOUR_PASSWORD",
# url where your rocket chat instance is running
server_url="https://demo.rocket.chat"
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("rocketchat_webhook.health").startswith(
"/webhooks/rocketchat")
assert routes_list.get("rocketchat_webhook.webhook").startswith(
"/webhooks/rocketchat/webhook")
# USED FOR DOCS - don't rename without changing in the docs
@pytest.mark.filterwarnings("ignore:"
"unclosed file.*:"
"ResourceWarning")
def test_telegram_channel():
# telegram channel will try to set a webhook, so we need to mock the api
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
httpretty.register_uri(
httpretty.POST,
'https://api.telegram.org/bot123:YOUR_ACCESS_TOKEN/setWebhook',
body='{"ok": true, "result": {}}')
httpretty.enable()
# START DOC INCLUDE
from rasa.core.channels.telegram import TelegramInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = TelegramInput(
# you get this when setting up a bot
access_token="123:YOUR_ACCESS_TOKEN",
# this is your bots username
verify="YOUR_TELEGRAM_BOT",
# the url your bot should listen for messages
webhook_url="YOUR_WEBHOOK_URL"
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("telegram_webhook.health").startswith(
"/webhooks/telegram")
assert routes_list.get("telegram_webhook.message").startswith(
"/webhooks/telegram/webhook")
httpretty.disable()
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
def test_handling_of_telegram_user_id():
# telegram channel will try to set a webhook, so we need to mock the api
httpretty.register_uri(
httpretty.POST,
'https://api.telegram.org/bot123:YOUR_ACCESS_TOKEN/setWebhook',
body='{"ok": true, "result": {}}')
# telegram will try to verify the user, so we need to mock the api
httpretty.register_uri(
httpretty.GET,
'https://api.telegram.org/bot123:YOUR_ACCESS_TOKEN/getMe',
body='{"result": {"id": 0, "first_name": "Test", "is_bot": true, '
'"username": "YOUR_TELEGRAM_BOT"}}')
# The channel will try to send a message back to telegram, so mock it.
httpretty.register_uri(
httpretty.POST,
'https://api.telegram.org/bot123:YOUR_ACCESS_TOKEN/sendMessage',
body='{"ok": true, "result": {}}')
httpretty.enable()
from rasa.core.channels.telegram import TelegramInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = TelegramInput(
# you get this when setting up a bot
access_token="123:YOUR_ACCESS_TOKEN",
# this is your bots username
verify="YOUR_TELEGRAM_BOT",
# the url your bot should listen for messages
webhook_url="YOUR_WEBHOOK_URL"
)
import rasa.core
app = Sanic(__name__)
app.agent = agent
rasa.core.channels.channel.register([input_channel],
app,
route="/webhooks/")
data = {"message": {"chat": {"id": 1234, "type": "private"},
"text": "Hello", "message_id": 0, "date": 0},
"update_id": 0}
test_client = app.test_client
test_client.post("/webhooks/telegram/webhook",
data=json.dumps(data),
headers={"Content-Type": 'application/json'})
assert agent.tracker_store.retrieve("1234") is not None
httpretty.disable()
# USED FOR DOCS - don't rename without changing in the docs
def test_twilio_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.twilio import TwilioInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = TwilioInput(
# you get this from your twilio account
account_sid="YOUR_ACCOUNT_SID",
# also from your twilio account
auth_token="YOUR_AUTH_TOKEN",
# a number associated with your twilio account
twilio_number="YOUR_TWILIO_NUMBER"
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("twilio_webhook.health").startswith(
"/webhooks/twilio")
assert routes_list.get("twilio_webhook.message").startswith(
"/webhooks/twilio/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_callback_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.callback import CallbackInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = CallbackInput(
# URL Core will call to send the bot responses
endpoint=EndpointConfig("http://localhost:5004")
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("callback_webhook.health").startswith(
"/webhooks/callback")
assert routes_list.get("callback_webhook.webhook").startswith(
"/webhooks/callback/webhook")
# USED FOR DOCS - don't rename without changing in the docs
def test_socketio_channel():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
# START DOC INCLUDE
from rasa.core.channels.socketio import SocketIOInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = SocketIOInput(
# event name for messages sent from the user
user_message_evt="user_uttered",
# event name for messages sent from the bot
bot_message_evt="bot_uttered",
# socket.io namespace to use for the messages
namespace=None
)
s = agent.handle_channels([input_channel], 5004)
# END DOC INCLUDE
# the above marker marks the end of the code snipped included
# in the docs
routes_list = utils.list_routes(s)
assert routes_list.get("socketio_webhook.health").startswith(
"/webhooks/socketio")
assert routes_list.get("handle_request").startswith(
"/socket.io")
async def test_callback_calls_endpoint():
from rasa.core.channels.callback import CallbackOutput
with aioresponses() as mocked:
mocked.post("https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"})
output = CallbackOutput(EndpointConfig("https://example.com/callback"))
await output.send_response("test-id", {
"text": "Hi there!",
"image": "https://example.com/image.jpg"})
r = latest_request(
mocked, "post", "https://example.com/callback")
assert r
image = r[-1].kwargs["json"]
text = r[-2].kwargs["json"]
assert image['recipient_id'] == "test-id"
assert image['image'] == "https://example.com/image.jpg"
assert text['recipient_id'] == "test-id"
assert text['text'] == "Hi there!"
def test_slack_message_sanitization():
from rasa.core.channels.slack import SlackInput
test_uid = 17213535
target_message_1 = 'You can sit here if you want'
target_message_2 = 'Hey, you can sit here if you want !'
target_message_3 = 'Hey, you can sit here if you want!'
uid_token = '<@{}>'.format(test_uid)
raw_messages = [test.format(uid=uid_token) for test
in ['You can sit here {uid} if you want{uid}',
'{uid} You can sit here if you want{uid} ',
'{uid}You can sit here if you want {uid}',
# those last cases may be disputable
# as we're virtually altering the entered text,
# but this seem to be the correct course of action
# (to be decided)
'You can sit here{uid}if you want',
'Hey {uid}, you can sit here if you want{uid}!',
'Hey{uid} , you can sit here if you want {uid}!']]
target_messages = [target_message_1,
target_message_1,
target_message_1,
target_message_1,
target_message_2,
target_message_3]
sanitized_messages = [SlackInput._sanitize_user_message(message,
[test_uid])
for message in raw_messages]
# no message that is wrongly sanitized please
assert len([sanitized
for sanitized, target
in zip(sanitized_messages, target_messages)
if sanitized != target]) == 0
def test_slack_init_one_parameter():
from rasa.core.channels.slack import SlackInput
ch = SlackInput("xoxb-test")
assert ch.slack_token == "xoxb-test"
assert ch.slack_channel is None
def test_slack_init_two_parameters():
from rasa.core.channels.slack import SlackInput
ch = SlackInput("xoxb-test", "test")
assert ch.slack_token == "xoxb-test"
assert ch.slack_channel == "test"
def test_is_slack_message_none():
from rasa.core.channels.slack import SlackInput
payload = {}
slack_message = json.loads(json.dumps(payload))
assert SlackInput._is_user_message(slack_message) is None
def test_is_slack_message_true():
from rasa.core.channels.slack import SlackInput
event = {'type': 'message',
'channel': 'C2147483705',
'user': 'U2147483697',
'text': 'Hello world',
'ts': '1355517523'}
payload = json.dumps({'event': event})
slack_message = json.loads(payload)
assert SlackInput._is_user_message(slack_message) is True
def test_is_slack_message_false():
from rasa.core.channels.slack import SlackInput
event = {'type': 'message',
'channel': 'C2147483705',
'user': 'U2147483697',
'text': 'Hello world',
'ts': '1355517523',
'bot_id': '1355517523'}
payload = json.dumps({'event': event})
slack_message = json.loads(payload)
assert SlackInput._is_user_message(slack_message) is False
def test_slackbot_init_one_parameter():
from rasa.core.channels.slack import SlackBot
ch = SlackBot("DummyToken")
assert ch.token == "DummyToken"
assert ch.slack_channel is None
def test_slackbot_init_two_parameter():
from rasa.core.channels.slack import SlackBot
bot = SlackBot("DummyToken", "General")
assert bot.token == "DummyToken"
assert bot.slack_channel == "General"
# Use monkeypatch for sending attachments, images and plain text.
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
async def test_slackbot_send_attachment_only():
from rasa.core.channels.slack import SlackBot
httpretty.register_uri(httpretty.POST,
'https://slack.com/api/chat.postMessage',
body='{"ok":true,"purpose":"Testing bots"}')
httpretty.enable()
bot = SlackBot("DummyToken", "General")
attachment = json.dumps([{"fallback": "Financial Advisor Summary",
"color": "#36a64f", "author_name": "ABE",
"title": "Financial Advisor Summary",
"title_link": "http://tenfactorialrocks.com",
"image_url": "https://r.com/cancel/r12",
"thumb_url": "https://r.com/cancel/r12",
"actions": [{"type": "button",
"text": "\ud83d\udcc8 Dashboard",
"url": "https://r.com/cancel/r12",
"style": "primary"},
{"type": "button",
"text": "\ud83d\udccb Download XL",
"url": "https://r.com/cancel/r12",
"style": "danger"},
{"type": "button",
"text": "\ud83d\udce7 E-Mail",
"url": "https://r.com/cancel/r12",
"style": "danger"}],
"footer": "Powered by 1010rocks",
"ts": 1531889719}])
await bot.send_attachment("ID", attachment)
httpretty.disable()
r = httpretty.latest_requests[-1]
assert r.parsed_body == {'channel': ['General'],
'as_user': ['True'],
'attachments': [attachment]}
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
async def test_slackbot_send_attachment_withtext():
from rasa.core.channels.slack import SlackBot
httpretty.register_uri(httpretty.POST,
'https://slack.com/api/chat.postMessage',
body='{"ok":true,"purpose":"Testing bots"}')
httpretty.enable()
bot = SlackBot("DummyToken", "General")
text = "Sample text"
attachment = json.dumps([{"fallback": "Financial Advisor Summary",
"color": "#36a64f", "author_name": "ABE",
"title": "Financial Advisor Summary",
"title_link": "http://tenfactorialrocks.com",
"image_url": "https://r.com/cancel/r12",
"thumb_url": "https://r.com/cancel/r12",
"actions": [{"type": "button",
"text": "\ud83d\udcc8 Dashboard",
"url": "https://r.com/cancel/r12",
"style": "primary"},
{"type": "button",
"text": "\ud83d\udccb XL",
"url": "https://r.com/cancel/r12",
"style": "danger"},
{"type": "button",
"text": "\ud83d\udce7 E-Mail",
"url": "https://r.com/cancel/r123",
"style": "danger"}],
"footer": "Powered by 1010rocks",
"ts": 1531889719}])
await bot.send_attachment("ID", attachment, text)
httpretty.disable()
r = httpretty.latest_requests[-1]
assert r.parsed_body == {'channel': ['General'],
'as_user': ['True'],
'text': ['Sample text'],
'attachments': [attachment]}
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
async def test_slackbot_send_image_url():
from rasa.core.channels.slack import SlackBot
httpretty.register_uri(httpretty.POST,
'https://slack.com/api/chat.postMessage',
body='{"ok":true,"purpose":"Testing bots"}')
httpretty.enable()
bot = SlackBot("DummyToken", "General")
url = json.dumps([{"URL": "http://www.rasa.net"}])
await bot.send_image_url("ID", url)
httpretty.disable()
r = httpretty.latest_requests[-1]
assert r.parsed_body['as_user'] == ['True']
assert r.parsed_body['channel'] == ['General']
assert len(r.parsed_body['attachments']) == 1
assert '"text": ""' in r.parsed_body['attachments'][0]
assert '"image_url": "[{\\"URL\\": \\"http://www.rasa.net\\"}]"' \
in r.parsed_body['attachments'][0]
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
async def test_slackbot_send_text():
from rasa.core.channels.slack import SlackBot
httpretty.register_uri(httpretty.POST,
'https://slack.com/api/chat.postMessage',
body='{"ok":true,"purpose":"Testing bots"}')
httpretty.enable()
bot = SlackBot("DummyToken", "General")
await bot.send_text_message("ID", "my message")
httpretty.disable()
r = httpretty.latest_requests[-1]
assert r.parsed_body == {'as_user': ['True'],
'channel': ['General'],
'text': ['my message']}
@pytest.mark.filterwarnings("ignore:"
"unclosed.*:"
"ResourceWarning")
def test_channel_inheritance():
with mock.patch.object(sanic.Sanic, 'run', fake_sanic_run):
from rasa.core.channels import RestInput
from rasa.core.channels import RasaChatInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RegexInterpreter
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
rasa_input = RasaChatInput("https://example.com")
s = agent.handle_channels([RestInput(), rasa_input], 5004)
routes_list = utils.list_routes(s)
assert routes_list.get(
"custom_webhook_RasaChatInput.health").startswith(
"/webhooks/rasa")
assert routes_list.get(
"custom_webhook_RasaChatInput.receive").startswith(
"/webhooks/rasa/webhook")
def test_int_sender_id_in_user_message():
from rasa.core.channels import UserMessage
# noinspection PyTypeChecker
message = UserMessage("A text", sender_id=1234567890)
assert message.sender_id == "1234567890"
def test_int_message_id_in_user_message():
from rasa.core.channels import UserMessage
# noinspection PyTypeChecker
message = UserMessage("B text", message_id=987654321)
assert message.message_id == "987654321"
async def test_send_custom_messages_without_buttons():
from rasa.core.channels.channel import OutputChannel
async def test_message(sender, message):
assert sender == 'user'
assert message == 'a : b'
channel = OutputChannel()
channel.send_text_message = test_message
await channel.send_custom_message("user",
[{'title': 'a', 'subtitle': 'b'}])
def test_newsline_strip():
from rasa.core.channels import UserMessage
message = UserMessage("\n/restart\n")
assert message.text == "/restart"
def test_register_channel_without_route():
"""Check we properly connect the input channel blueprint if route is None"""
from rasa.core.channels import RestInput
import rasa.core
# load your trained agent
agent = Agent.load(MODEL_PATH, interpreter=RegexInterpreter())
input_channel = RestInput()
app = Sanic(__name__)
rasa.core.channels.channel.register([input_channel],
app,
route=None)
routes_list = utils.list_routes(app)
assert routes_list.get("custom_webhook_RestInput.receive").startswith(
"/webhook")
| apache-2.0 | 1,847,347,241,781,421,800 | 36.626109 | 80 | 0.581568 | false |
yuwen41200/biodiversity-analysis | utils/roadkill-packer/htmlGenerator.py | 1 | 4419 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
html_beginning = '''<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.11.0/bootstrap-table.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css">
</head>
<body>
<table data-toggle="table" data-pagination="true">
<thead>
<tr>
<th data-sortable="true">id</th>
<th data-sortable="true">family</th>
<th data-sortable="true">taxonRemarks</th>
<th data-sortable="true">scientificName</th>
<th data-sortable="true">vernacularName</th>
<th data-sortable="true">previousIdentifications</th>
<th data-sortable="true">individualCount</th>
<th data-sortable="true">occurrenceRemarks</th>
<th data-sortable="true">modified</th>
<th data-sortable="true">eventRemarks</th>
<th data-sortable="true">institutionCode</th>
<th data-sortable="true">eventDate</th>
<th data-sortable="true">recordedBy</th>
<th data-sortable="true">rightsHolder</th>
<th data-sortable="true">municipality</th>
<th data-sortable="true">rights</th>
<th data-sortable="true">decimalLongitude</th>
<th data-sortable="true">decimalLatitude</th>
<th data-sortable="true">fieldNotes</th>
<th data-sortable="true">identificationVerificationStatus</th>
<th data-sortable="true">recordNumber</th>
<th data-sortable="true">materialSampleID</th>
<th data-sortable="true">locationRemarks</th>
<th data-sortable="true">associatedReferences</th>
<th data-sortable="true">associatedMedia</th>
<th data-sortable="true">basisOfRecord</th>
<th data-sortable="true">language</th>
<th data-sortable="true">continent</th>
<th data-sortable="true">country</th>
<th data-sortable="true">countryCode</th>
</tr>
</thead>
<tbody>
'''
html_ending = ''' </tbody>
</table>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.0/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.11.0/bootstrap-table.min.js"></script>
</body>
</html>
'''
def generate(input_path, output_path):
"""
Generate an HTML web page from a CSV file.
:param input_path: Path of input (CSV) file.
:param output_path: Path of output (HTML) file.
:return: None.
"""
with open(input_path, newline='') as input_file:
rows = csv.reader(input_file)
next(rows, None)
with open(output_path, 'w') as output_file:
output_file.write(html_beginning)
for row in rows:
output_file.write(' <tr>\n')
for no, column in enumerate(row):
if no < 23:
output_file.write(' <td>' + column + '</td>\n')
else:
link = 'http:' + column if column.startswith('//') else column
link = link.replace('http://', 'https://', 1) if 'facebook' in column else link
output_file.write(' <td><a href="' + link + '" target="_blank">' + column + '</a></td>\n')
output_file.write(' <td>HumanObservation</td>\n')
output_file.write(' <td>zh-Hant-TW</td>\n')
output_file.write(' <td>Asia</td>\n')
output_file.write(' <td>Taiwan</td>\n')
output_file.write(' <td>TW</td>\n')
output_file.write(' </tr>\n')
output_file.write(html_ending)
| gpl-3.0 | -7,277,954,826,530,736,000 | 48.1 | 133 | 0.505318 | false |
grupodyd/python-filapp | setup.py | 1 | 1785 | #!/usr/bin/env python
#
# Copyright 2015 DyD Dinámica y Desarrollo SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-filapp library.'''
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='python-filapp',
version='0.1',
author='DyD Dinamica y Desarrollo SAS',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/grupodyd/python-filapp',
keywords='filapp api',
description='A Python wrapper around the Filapp API',
long_description=(read('CHANGES')),
packages=find_packages(exclude=['tests*']),
install_requires=['requests'],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| apache-2.0 | 4,565,196,317,821,026,300 | 32.660377 | 74 | 0.67657 | false |
wtsi-hgi/CoGS-Webapp | cogs/db/interface.py | 1 | 14479 | """
Copyright (c) 2017, 2018 Genome Research Ltd.
Authors:
* Christopher Harrison <[email protected]>
* Simon Beal <[email protected]>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import atexit
from datetime import datetime
from typing import Dict, List, Optional, overload
from sqlalchemy import create_engine, desc
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Session, sessionmaker
from sqlalchemy.exc import ProgrammingError
from cogs.common import logging
from cogs.common.constants import PERMISSIONS, ROTATION_TEMPLATE_IDS
from .models import Base, EmailTemplate, Project, ProjectGroup, User
import cogs.mail.postman
class Database(logging.LogWriter):
""" Database interface """
_engine:Engine
_session:Session
def __init__(self, config:Dict) -> None:
"""
Constructor: Connect to and initialise the database session
:param config:
:return:
"""
# Connect to database and instantiate models
self.log(logging.DEBUG, "Connecting to PostgreSQL database \"{name}\" at {host}:{port}".format(**config))
self._engine = create_engine("postgresql://{user}:{passwd}@{host}:{port}/{name}".format(**config))
Base.metadata.create_all(self._engine)
# Start session (and register close on exit)
Session = sessionmaker(bind=self._engine)
self._session = Session()
atexit.register(self._session.close)
self._create_minimal()
def _create_minimal(self) -> None:
"""
Create minimal data in the database for a working system
"""
# Set up the e-mail template placeholders for rotation
# invitations, if they don't already exist
all_db_templates = [template.name for template in self.get_all_templates()]
for template in ROTATION_TEMPLATE_IDS:
if template not in all_db_templates:
self._session.add(EmailTemplate(name=template,
subject=f"Placeholder subject for {template}",
content=f"Placeholder content for {template}"))
for name, subject, content in cogs.mail.postman.get_filesystem_templates(exclude=all_db_templates):
self._session.add(EmailTemplate(name=name,
subject=subject,
content=content))
# TODO Tidy the below up / set the defaults more appropriately
if not self.get_all_users():
self.log(logging.INFO, "No users found. Adding admins.")
_admin_args = {"user_type": "grad_office", "priority": 0, "email_personal": None}
self._session.add(User(name="Simon Beal", email="[email protected]", **_admin_args))
self._session.add(User(name="Carl Anderson", email="[email protected]", **_admin_args))
self._session.add(User(name="Christopher Harrison", email="[email protected]", **_admin_args))
if not self._session.query(ProjectGroup).all():
self.log(logging.INFO, "No groups found. Adding rotation 1 2017.")
self._session.add(ProjectGroup(series=2017,
part=1,
supervisor_submit=datetime.strptime("18/07/2017", "%d/%m/%Y"),
student_invite=datetime.strptime("08/08/2017", "%d/%m/%Y"),
student_choice=datetime.strptime("30/08/2017", "%d/%m/%Y"),
student_complete=datetime.strptime("20/12/2017", "%d/%m/%Y"),
marking_complete=datetime.strptime("15/01/2018", "%d/%m/%Y"),
student_viewable=True,
student_choosable=True,
student_uploadable=False,
can_finalise=True,
read_only=False))
self._session.commit()
def reset_all(self) -> None:
"""
Reset everything in the database
For debugging use only!
"""
for table in Base.metadata.tables.values():
try:
self.engine.execute(f"DROP TABLE {table} CASCADE;")
except ProgrammingError:
try:
self.engine.execute(f'DROP TABLE "{table}" CASCADE;')
except ProgrammingError:
pass
Base.metadata.create_all(self._engine)
self._create_minimal()
## Convenience methods and properties ##############################
@property
def engine(self) -> Engine:
return self._engine
@property
def session(self) -> Session:
return self._session
def add(self, model:Base) -> None:
self._session.add(model)
def commit(self) -> None:
self._session.commit()
## E-Mail Template Methods #########################################
def get_template_by_name(self, name:str) -> Optional[EmailTemplate]:
"""
Get an e-mail template by its name
:param name:
:return:
"""
q = self._session.query(EmailTemplate)
return q.filter(EmailTemplate.name == name) \
.first()
def get_all_templates(self) -> List[EmailTemplate]:
"""
Get all e-mail templates in the system
:return:
"""
return self._session.query(EmailTemplate) \
.order_by(EmailTemplate.name) \
.all()
## Project Methods #################################################
def get_project_by_id(self, project_id:int) -> Optional[Project]:
"""
Get a project by its ID
:param project_id:
:return:
"""
q = self._session.query(Project)
return q.filter(Project.id == project_id) \
.first()
def get_project_by_name(self, project_name:str) -> Optional[Project]:
"""
Get the newest project by its name
TODO Do we need this? Fetching something by an arbitrary string
(i.e., non-key) seems like a bit of an antipattern...
:param project_name:
:return:
"""
q = self._session.query(Project)
return q.filter(Project.title == project_name) \
.order_by(desc(Project.id)) \
.first()
@overload
def get_projects_by_student(self, student:User, group:None = None) -> List[Project]:
...
@overload
def get_projects_by_student(self, student:User, group:ProjectGroup) -> Optional[Project]:
...
def get_projects_by_student(self, student, group = None):
"""
Get the list of projects for the specified student or, if a
project group is specified, that student's project in that group
:param student:
:param group:
:return:
"""
q = self._session.query(Project)
attr = "all"
clause = (Project.student == student)
if group:
clause &= (Project.group == group)
attr = "first"
return getattr(q.filter(clause) \
.order_by(Project.group_id), attr)()
def get_projects_by_supervisor(self, supervisor:User, group:Optional[ProjectGroup] = None) -> List[Project]:
"""
Get the list of projects set by the specified supervisor,
optionally restricted to a given project group
:param supervisor:
:param group:
:return:
"""
q = self._session.query(Project)
clause = (Project.supervisor == supervisor)
if group:
clause &= (Project.group == group)
return q.filter(clause) \
.order_by(Project.id) \
.all()
def get_projects_by_cogs_marker(self, cogs_marker:User, group:Optional[ProjectGroup] = None) -> List[Project]:
"""
Get the list of projects set by the specified CoGS marker,
optionally restricted to a given project group
:param cogs_marker:
:param group:
:return:
"""
q = self._session.query(Project)
clause = (Project.cogs_marker == cogs_marker)
if group:
clause &= (Project.group == group)
return q.filter(clause) \
.order_by(Project.id) \
.all()
## Project Group Methods ###########################################
def get_project_group(self, series:int, part:int) -> Optional[ProjectGroup]:
"""
Get the project group for the specified series and part
:param series:
:param part:
:return:
"""
q = self._session.query(ProjectGroup)
return q.filter(
(ProjectGroup.series == series) & (ProjectGroup.part == part)
).first()
def get_project_groups_by_series(self, series:int) -> List[ProjectGroup]:
"""
Get all project groups for the specified series
:param series:
:return:
"""
q = self._session.query(ProjectGroup)
return q.filter(ProjectGroup.series == series) \
.order_by(ProjectGroup.part) \
.all()
def get_most_recent_group(self) -> Optional[ProjectGroup]:
"""
Get the most recently created project group
:return ProjectGroup:
"""
q = self._session.query(ProjectGroup)
return q.order_by(desc(ProjectGroup.id)) \
.first()
## Series Methods ##################################################
# FIXME "Series" broadly represents academic years (i.e., a set of
# rotations/project groups). Currently these don't exist as a
# database entity; they just implicitly exist by virtue of their ID
# corresponding to the calendar year at the start of the series.
# This comes with a lot of assumptions, that could be done away with
# by explicitly defining series. This would have the additional
# benefit of defining a proper object hierarchy, which is where most
# of these methods belong (rather than in this database God-object).
def get_students_in_series(self, series:int) -> List[User]:
"""
Get the list of all students who are enrolled on projects in the
given series
:param series:
:return:
"""
# TODO This would be better implemented as a join in the
# database, rather than rolling our own.
return list({
project.student
for rotation in self.get_project_groups_by_series(series)
for project in rotation.projects
if project.student is not None})
def get_all_years(self) -> List[int]:
"""
Get the complete, sorted list of years
:return:
"""
q = self._session.query(ProjectGroup)
return [
group.series
for group in q.distinct(ProjectGroup.series) \
.order_by(desc(ProjectGroup.series)) \
.all()]
def get_all_series(self) -> List[ProjectGroup]:
"""
Get every series
:return ProjectGroup:
"""
q = self._session.query(ProjectGroup)
return q.order_by(desc(ProjectGroup.id)) \
.all()
## User Methods ####################################################
def get_user_by_id(self, uid:int) -> Optional[User]:
"""
Get a user by their ID
:param uid:
:return:
"""
q = self._session.query(User)
return q.filter(User.id == uid) \
.first()
def get_user_by_email(self, email:str) -> Optional[User]:
"""
Get a user by their e-mail address
:param email:
:return:
"""
q = self._session.query(User)
return q.filter((User.email == email) | (User.email_personal == email)) \
.first()
def get_users_by_permission(self, *permissions:str) -> List[User]:
"""
Return the users who have any of the specified permissions
:param permissions:
:return:
"""
# We must have at least one permission and our given permissions
# must be a subset of the valid permissions
assert permissions
assert set(permissions) <= set(PERMISSIONS)
return [
user
for user in self.get_all_users()
if any(getattr(user.role, p) for p in permissions)]
def get_all_users(self) -> List[User]:
"""
Get all users in the system
:return:
"""
return self._session.query(User).all()
def can_student_choose_project(self, user:User, project:Project) -> bool:
"""
Can the given user (student) choose the specified project? Only
if their role allows and, for their final project, they've done
at least one computational and wetlab project
:param user:
:param project:
:return:
"""
if user.role.join_projects:
if project.group.part != 3:
# If it's not the final rotation,
# then the student can pick any project
return True
all_projects = [project] + [
p for p in self.get_projects_by_student(user)
if p.group.series == project.group.series]
done_computational = any(p.is_computational for p in all_projects)
done_wetlab = any(p.is_wetlab for p in all_projects)
return done_computational and done_wetlab
return False
| agpl-3.0 | -7,453,707,756,658,393,000 | 33.889157 | 114 | 0.558602 | false |
sloria/osf.io | api/nodes/permissions.py | 1 | 9092 | # -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import (
AbstractNode,
Contributor,
DraftRegistration,
Institution,
Node,
NodeRelation,
OSFUser,
PreprintService,
PrivateLink,
)
from osf.utils import permissions as osf_permissions
from website.project.metadata.utils import is_prereg_admin
from api.base.utils import get_user_auth, is_deprecated
class ContributorOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
from api.nodes.views import NodeProvider
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, (NodeProvider, PreprintService)):
obj = obj.node
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node, NodeProvider, NodeRelation, PreprintService, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node got {}'.format(obj)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class IsContributor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_contributor(auth.user)
else:
return obj.has_permission(auth.user, 'write')
class IsAdminOrReviewer(permissions.BasePermission):
"""
Prereg admins can update draft registrations.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
if request.method != 'DELETE' and is_prereg_admin(auth.user):
return True
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Institution, BaseAddonSettings, DraftRegistration, PrivateLink)), 'obj must be an Node, User, Institution, Draft Registration, PrivateLink, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Contributor)), 'obj must be User, Contributor, or Node, got {}'.format(obj)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
user = OSFUser.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
return node.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node or NodeRelation, got {}'.format(obj)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
def has_object_permission(self, request, view, obj):
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert isinstance(obj, AbstractNode), 'obj must be an Node'
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
| apache-2.0 | -5,648,631,933,092,123,000 | 39.408889 | 232 | 0.66366 | false |
rbn920/feebb | feebb/test.py | 1 | 1640 | from feebb import *
import matplotlib.pyplot as plt
pre = Preprocessor()
pre.load_json('ex_json/test2.json')
elems = [Element(elem) for elem in pre.elements]
print(pre.supports)
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2m.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mmm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
| mit | -3,904,780,058,405,213,000 | 25.885246 | 48 | 0.714634 | false |
Cerfoglg/cattle | tests/integration/cattletest/core/test_healthcheck.py | 1 | 58694 | from common_fixtures import * # NOQA
from cattle import ApiError
import yaml
def _get_agent_for_container(context, container):
agent = None
for map in container.hosts()[0].instanceHostMaps():
try:
c = map.instance()
except Exception:
continue
if c.agentId is not None:
agent = c.agent()
break
if agent is None:
client = context.client
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='agentglobal' + random_str(),
launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}, "labels": {
'io.rancher.scheduler.global': 'true',
'io.rancher.container.create_agent': 'true'
}
}, stackId=env.id)
svc = wait_state(client, svc, 'inactive')
client.wait_success(svc.activate())
for map in container.hosts()[0].instanceHostMaps():
try:
c = map.instance()
except Exception:
continue
if c.agentId is not None:
agent = c.agent()
break
assert agent is not None
return agent
def _get_agent_client(agent):
creds = agent.account().credentials()
api_key = [x for x in creds if x.kind == 'agentApiKey'][0]
assert len(api_key)
return api_client(api_key.publicValue, api_key.secretValue)
def test_upgrade_with_health(client, context, super_client):
env = client.create_stack(name='env-' + random_str())
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
# upgrade the service and
new_launch_config = {"imageUuid": image_uuid,
'healthCheck': {
'port': 80,
}}
strategy = {"launchConfig": new_launch_config,
"intervalMillis": 100}
svc.upgrade_action(inServiceStrategy=strategy)
def wait_for_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active')
return len(m) == 2
def wait_for_managed_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active',
managed=True)
return len(m) == 1
wait_for_condition(client, svc, wait_for_map_count)
wait_for_condition(client, svc, wait_for_managed_map_count)
m = super_client. \
list_serviceExposeMap(serviceId=svc.id, state='active', managed=True)
c = super_client.reload(m[0].instance())
wait_for(lambda: super_client.reload(c).state == 'running')
c = super_client.reload(m[0].instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
# shouldn't become upgraded at this point
try:
wait_for(lambda: super_client.reload(svc).state == 'upgraded',
timeout=30)
except Exception:
pass
_update_healthy(agent, hcihm, c, super_client)
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: super_client.reload(svc).state == 'upgraded')
def test_rollback_with_health(client, context, super_client):
env = client.create_stack(name='env-' + random_str())
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
'healthCheck': {
'port': 80,
}}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
# issue the upgrade for the service
# it should get stuck in upgrading state
# as the health_state is init
strategy = {"launchConfig": launch_config,
"intervalMillis": 100}
svc.upgrade_action(inServiceStrategy=strategy)
wait_for(lambda: super_client.reload(svc).state == 'upgrading',
timeout=30)
svc = super_client.reload(svc)
# rollback the service
svc = wait_state(client, svc.cancelupgrade(), 'canceled-upgrade')
client.wait_success(svc.rollback())
def test_upgrade_start_first_with_health(client, context, super_client):
env = client.create_stack(name='env-' + random_str())
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1)
svc = client.wait_success(svc)
assert svc.state == "inactive"
env.activateservices()
svc = client.wait_success(svc, 120)
assert svc.state == "active"
# upgrade the service and
# check that c3 and c2 got the same ip
new_launch_config = {"imageUuid": image_uuid,
'healthCheck': {
'port': 80,
}}
strategy = {"launchConfig": new_launch_config,
"intervalMillis": 100,
"startFirst": True}
svc.upgrade_action(inServiceStrategy=strategy)
def wait_for_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active')
return len(m) == 2
def wait_for_managed_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active',
managed=True)
return len(m) == 1
wait_for_condition(client, svc, wait_for_map_count)
wait_for_condition(client, svc, wait_for_managed_map_count)
m = super_client. \
list_serviceExposeMap(serviceId=svc.id, state='active', managed=True)
c = super_client.reload(m[0].instance())
wait_for(lambda: super_client.reload(c).state == 'running')
c = super_client.reload(m[0].instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
_update_healthy(agent, hcihm, c, super_client)
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: super_client.reload(svc).state == 'upgraded')
def test_health_check_create_instance(super_client, context):
c = context.create_container(healthCheck={
'port': 80,
})
assert c.healthCheck.port == 80
c = super_client.reload(c)
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == c.accountId
assert se.instanceId == c.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
assert hcihm.externalTimestamp == ts
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
def _create_svc_w_healthcheck(client, context):
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
return service
def test_health_check_create_service(super_client, context, client):
service = _create_svc_w_healthcheck(client, context)
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='INIT',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
# restart the instance
c = super_client.wait_success(c.stop())
wait_for(lambda: super_client.reload(c).state == 'running')
wait_for(lambda: super_client.reload(c).healthState == 'reinitializing')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something bad',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'reinitializing')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='INIT',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == c.accountId
assert se.instanceId == c.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
assert hcihm.externalTimestamp == ts
wait_for(lambda: super_client.reload(c).healthState == 'unhealthy')
wait_for(lambda: len(service.serviceExposeMaps()) == 1)
remove_service(service)
def test_health_check_ip_retain(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id, scale=1, retainIp=True)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c1 = super_client.reload(expose_map.instance())
ip1 = c1.primaryIpAddress
hci = find_one(c1.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c1)
assert hcihm.healthState == 'initializing'
assert c1.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c1).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == c1.accountId
assert se.instanceId == c1.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
assert hcihm.externalTimestamp == ts
wait_for(lambda: super_client.reload(c1).healthState == 'unhealthy')
wait_for(lambda: len(service.serviceExposeMaps()) == 1)
super_client.wait_success(c1)
for e_map in service.serviceExposeMaps():
if e_map.instance().id == c1.id:
continue
c2 = super_client.wait_success(e_map.instance())
assert c2.name == c1.name
assert c2.primaryIpAddress == ip1
break
remove_service(service)
def test_health_state_stack(super_client, context, client):
c = client
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
i = 'initializing'
wait_for(lambda: super_client.reload(service).healthState == i)
wait_for(lambda: client.reload(env).healthState == i)
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c1 = super_client.reload(expose_map.instance())
hci = find_one(c1.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c1)
assert hcihm.healthState == 'initializing'
assert c1.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c1).healthState == 'healthy')
wait_for(lambda: super_client.reload(service).healthState == 'healthy')
wait_for(lambda: c.reload(env).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == c1.accountId
assert se.instanceId == c1.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
assert hcihm.externalTimestamp == ts
wait_for(lambda: super_client.reload(c1).healthState == 'unhealthy')
wait_for(lambda: super_client.reload(service).healthState == 'unhealthy')
wait_for(lambda: c.reload(env).healthState == 'unhealthy')
remove_service(service)
def test_health_state_start_once(super_client, context, client):
c = client
env = client.create_stack(name='env-' + random_str())
labels = {"io.rancher.container.start_once": "true"}
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
},
'labels': labels
}, stackId=env.id, scale=1)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
wait_for(lambda: super_client.reload(svc).healthState == 'initializing')
wait_for(lambda: c.reload(env).healthState == 'initializing')
maps = _wait_until_active_map_count(svc, 1, client)
expose_map = maps[0]
c1 = super_client.reload(expose_map.instance())
hci = find_one(c1.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c1)
assert hcihm.healthState == 'initializing'
assert c1.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c1).healthState == 'healthy')
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: c.reload(env).healthState == 'healthy')
super_client.wait_success(c1.stop())
wait_for(lambda: super_client.reload(svc).healthState == 'started-once')
wait_for(lambda: c.reload(env).healthState == 'started-once')
remove_service(svc)
def test_health_state_sidekick_start_once(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
labels = {"io.rancher.container.start_once": "true"}
slc = {'imageUuid': context.image_uuid, 'name': "test1"}
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'labels': labels
}, stackId=env.id, scale=1, secondaryLaunchConfigs=[slc])
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: client.reload(env).healthState == 'healthy')
def test_health_state_selectors(context, client):
env = client.create_stack(name='env-' + random_str())
labels = {'foo': "bar"}
container1 = client.create_container(imageUuid=context.image_uuid,
startOnCreate=True,
labels=labels)
container1 = client.wait_success(container1)
assert container1.state == "running"
launch_config = {"imageUuid": "rancher/none"}
service = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
selectorContainer="foo=bar")
service = client.wait_success(service)
assert service.selectorContainer == "foo=bar"
service = client.wait_success(service.activate())
wait_for(lambda: client.reload(service).healthState == 'healthy')
wait_for(lambda: client.reload(env).healthState == 'healthy')
remove_service(service)
def test_svc_health_state(context, client):
env = client.create_stack(name='env-' + random_str())
launch_config = {"imageUuid": context.image_uuid}
service = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1)
service = client.wait_success(service)
service = client.wait_success(service.activate())
wait_for(lambda: client.reload(service).healthState == 'healthy')
wait_for(lambda: client.reload(env).healthState == 'healthy')
remove_service(service)
def test_health_check_init_timeout(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
'initializingTimeout': 2000,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
h_c = service.launchConfig.healthCheck
assert h_c.initializingTimeout == 2000
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
# wait for the instance to be removed
wait_for_condition(client, c, lambda x: x.removed is None)
remove_service(service)
def test_health_check_reinit_timeout(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
'reinitializingTimeout': 1,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
h_c = service.launchConfig.healthCheck
assert h_c.reinitializingTimeout == 1
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
# restart the instance
c = super_client.wait_success(c.stop())
wait_for(lambda: super_client.reload(c).state == 'running')
wait_for(lambda: super_client.reload(c).healthState == 'reinitializing')
# wait for the instance to be removed
wait_for_condition(super_client, c,
lambda x: x.removed is not None)
remove_service(service)
def test_health_check_bad_external_timestamp(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, container)
agent_client = _get_agent_client(agent)
assert hcihm.healthState == 'initializing'
with pytest.raises(ApiError) as e:
agent_client.create_service_event(reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'externalTimestamp'
remove_service(service)
def test_health_check_noop(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
'strategy': 'none'
}
}, stackId=env.id)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
assert svc.launchConfig.healthCheck.strategy == 'none'
maps = _wait_until_active_map_count(svc, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
hci = find_one(c.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
assert c.healthState == 'initializing'
hcihm = _update_healthy(agent, hcihm, c, super_client)
_update_unhealthy(agent, hcihm, c, super_client)
svc = super_client.wait_success(svc)
assert svc.state == "active"
assert len(svc.serviceExposeMaps()) == 1
c = super_client.wait_success(c)
assert c.state == 'running'
remove_service(svc)
def remove_service(svc):
for i in range(1, 3):
try:
svc.remove()
return
except Exception:
pass
time.sleep(1)
def test_health_check_quorum(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
'recreateOnQuorumStrategyConfig': {"quorum": 2},
'strategy': "recreateOnQuorum"
}
}, stackId=env.id, scale=2)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
action = svc.launchConfig.healthCheck.strategy
config = svc.launchConfig.healthCheck.recreateOnQuorumStrategyConfig
assert action == 'recreateOnQuorum'
assert config.quorum == 2
expose_maps = svc.serviceExposeMaps()
c1 = super_client.reload(expose_maps[0].instance())
hci1 = find_one(c1.healthcheckInstances)
hcihm1 = find_one(hci1.healthcheckInstanceHostMaps)
agent1 = _get_agent_for_container(context, c1)
assert hcihm1.healthState == 'initializing'
assert c1.healthState == 'initializing'
hcihm1 = _update_healthy(agent1, hcihm1, c1, super_client)
c2 = super_client.reload(expose_maps[1].instance())
hci2 = find_one(c2.healthcheckInstances)
hcihm2 = find_one(hci2.healthcheckInstanceHostMaps)
agent2 = _get_agent_for_container(context, c2)
assert hcihm2.healthState == 'initializing'
assert c2.healthState == 'initializing'
_update_healthy(agent2, hcihm2, c2, super_client)
# update unheatlhy, check container is not removed
# as quorum is not reached yet
_update_unhealthy(agent1, hcihm1, c1, super_client)
svc = super_client.wait_success(svc)
assert svc.state == "active"
assert len(svc.serviceExposeMaps()) == 2
c1 = super_client.wait_success(c1)
assert c1.state == 'running'
wait_for_condition(client, svc,
lambda x: x.healthState == 'degraded')
# update healthy and increase the scale
hcihm1 = _update_healthy(agent1, hcihm1, c1, super_client)
wait_state(client, svc, 'active')
wait_for_condition(client, svc,
lambda x: x.healthState == 'healthy')
svc = super_client.reload(svc)
svc = super_client.update(svc, scale=3)
svc = super_client.wait_success(svc)
assert svc.scale == 3
assert svc.state == 'active'
assert len(svc.serviceExposeMaps()) == 3
expose_maps = svc.serviceExposeMaps()
for m in expose_maps:
if m.instance().id == c1.id or m.instance().id == c2.id:
continue
c3 = m.instance()
c3 = super_client.reload(c3)
hci3 = find_one(c3.healthcheckInstances)
hcihm3 = find_one(hci3.healthcheckInstanceHostMaps)
agent3 = _get_agent_for_container(context, c3)
assert hcihm3.healthState == 'initializing'
assert c3.healthState == 'initializing'
_update_healthy(agent3, hcihm3, c3, super_client)
# update unhealthy, check container removed
# as quorum is reached
_update_unhealthy(agent1, hcihm1, c1, super_client)
svc = super_client.wait_success(svc)
assert svc.state == "active"
assert len(svc.serviceExposeMaps()) >= 3
wait_for_condition(client, c1,
lambda x: x.removed is not None)
remove_service(svc)
def test_health_check_chk_name_quorum(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
'recreateOnQuorumStrategyConfig': {"quorum": 2},
'strategy': "recreateOnQuorum"
}
}, stackId=env.id, scale=1)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
action = svc.launchConfig.healthCheck.strategy
config = svc.launchConfig.healthCheck.recreateOnQuorumStrategyConfig
assert action == 'recreateOnQuorum'
assert config.quorum == 2
expose_maps = svc.serviceExposeMaps()
c1 = super_client.reload(expose_maps[0].instance())
hci1 = find_one(c1.healthcheckInstances)
hcihm1 = find_one(hci1.healthcheckInstanceHostMaps)
agent1 = _get_agent_for_container(context, c1)
assert hcihm1.healthState == 'initializing'
assert c1.healthState == 'initializing'
hcihm1 = _update_healthy(agent1, hcihm1, c1, super_client)
# update unheatlhy, check container is not removed
_update_unhealthy(agent1, hcihm1, c1, super_client)
svc = super_client.wait_success(svc)
assert svc.state == "active"
assert len(svc.serviceExposeMaps()) == 1
c1 = super_client.wait_success(c1)
assert c1.state == 'running'
# leave the container as unhealthy and scale up
# check that the new container is created
# with new service index
svc = client.update(svc, scale=2, name=svc.name)
svc = client.wait_success(svc)
expose_maps = svc.serviceExposeMaps()
assert len(expose_maps) == 2
c1 = super_client.reload(expose_maps[0].instance())
c2 = super_client.reload(expose_maps[1].instance())
assert c1.state == 'running'
assert c2.state == 'running'
assert c1.serviceIndex != c2.serviceIndex
def test_health_check_default(super_client, context, client):
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80
}
}, stackId=env.id)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
expose_maps = svc.serviceExposeMaps()
c1 = super_client.reload(expose_maps[0].instance())
hci = find_one(c1.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(context, c1)
assert hcihm.healthState == 'initializing'
assert c1.healthState == 'initializing'
hcihm = _update_healthy(agent, hcihm, c1, super_client)
# update unheatlhy, the container should be removed
_update_unhealthy(agent, hcihm, c1, super_client)
svc = super_client.wait_success(svc)
assert svc.state == "active"
assert len(svc.serviceExposeMaps()) >= 1
c1 = super_client.wait_success(c1)
wait_for_condition(client, c1,
lambda x: x.removed is not None)
remove_service(svc)
def test_health_check_bad_agent(super_client, context, client):
# Create another host to get the agent from that host
host2 = super_client.reload(register_simulated_host(context))
# register one more host to ensure
# there is at least one more host
# to schedule healtcheck on
register_simulated_host(context)
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
},
"labels": {
'io.rancher.scheduler.global': 'true'
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 3, client)
expose_map = maps[0]
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = None
wait_for(lambda: len(hci.healthcheckInstanceHostMaps()) > 1)
for h in hci.healthcheckInstanceHostMaps():
if h.hostId != host2.id:
hcihm = h
break
assert hcihm is not None
agent_client = _get_agent_client(host2.agent())
assert hcihm.healthState == 'initializing'
ts = int(time.time())
with pytest.raises(ApiError) as e:
agent_client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'CantVerifyHealthcheck'
remove_service(service)
def test_health_check_reconcile(super_client, new_context):
super_client.reload(register_simulated_host(new_context))
super_client.reload(register_simulated_host(new_context))
client = new_context.client
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': new_context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
initial_len = len(c.healthcheckInstanceHostMaps())
assert initial_len == 2
for h in c.healthcheckInstanceHostMaps():
assert h.healthState == c.healthState
hcihm1 = c.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm1.host().uuid)
assert len(hosts) == 1
hcihm2 = c.healthcheckInstanceHostMaps()[1]
hosts = super_client.list_host(uuid=hcihm2.host().uuid)
assert len(hosts) == 1
host2 = hosts[0]
agent = _get_agent_for_container(new_context, c)
assert hcihm1.healthState == 'initializing'
assert hcihm2.healthState == 'initializing'
assert c.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm1.uuid)
super_client.wait_success(se)
hcihm1 = super_client.wait_success(super_client.reload(hcihm1))
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm2.uuid)
super_client.wait_success(se)
super_client.wait_success(super_client.reload(hcihm2))
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm1.uuid)
super_client.wait_success(se)
super_client.wait_success(super_client.reload(hcihm1))
# still healthy as only one host reported healthy
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
# remove the host 1
host2 = super_client.wait_success(host2.deactivate())
host2 = super_client.wait_success(super_client.delete(host2))
assert host2.state == 'removed'
# should be unhealthy as the only health state reported is unhealthy
wait_for(lambda: super_client.reload(c).healthState == 'unhealthy')
remove_service(service)
def test_health_check_all_hosts_removed_reconcile(super_client, new_context):
super_client.reload(register_simulated_host(new_context))
client = new_context.client
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': new_context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
initial_len = len(c.healthcheckInstanceHostMaps())
assert initial_len == 1
for h in c.healthcheckInstanceHostMaps():
assert h.healthState == c.healthState
hcihm1 = c.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm1.host().uuid)
assert len(hosts) == 1
host1 = hosts[0]
agent = _get_agent_for_container(new_context, c)
assert hcihm1.healthState == 'initializing'
assert c.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm1.uuid)
super_client.wait_success(se)
super_client.wait_success(super_client.reload(hcihm1))
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
# remove health checker host
host1 = super_client.wait_success(host1.deactivate())
host1 = super_client.wait_success(super_client.delete(host1))
assert host1.state == 'removed'
# instance should stay as healthy
try:
wait_for(lambda: super_client.reload(c).healthState == 'unhealthy',
timeout=5)
except Exception:
pass
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
remove_service(service)
def test_hosts_removed_reconcile_when_init(super_client, new_context):
super_client.reload(register_simulated_host(new_context))
client = new_context.client
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': new_context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
initial_len = len(c.healthcheckInstanceHostMaps())
assert initial_len == 1
for h in c.healthcheckInstanceHostMaps():
assert h.healthState == c.healthState
hcihm1 = c.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm1.host().uuid)
assert len(hosts) == 1
host1 = hosts[0]
assert hcihm1.healthState == 'initializing'
assert c.healthState == 'initializing'
# remove the healthchecking host
host1 = super_client.wait_success(host1.deactivate())
host1 = super_client.wait_success(super_client.delete(host1))
assert host1.state == 'removed'
# instance should remain as healthy as there are no reporters at this point
try:
wait_for(lambda: super_client.reload(c).healthState == 'unhealthy',
timeout=5)
except Exception:
pass
wait_for(lambda: super_client.reload(c).healthState == 'initializing')
remove_service(service)
def test_health_check_host_remove(super_client, new_context):
client = new_context.client
# create 4 hosts for healtcheck as one of them would be removed later
super_client.reload(register_simulated_host(new_context))
super_client.reload(register_simulated_host(new_context))
super_client.reload(register_simulated_host(new_context))
super_client.reload(register_simulated_host(new_context))
env = client.create_stack(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': new_context.image_uuid,
'healthCheck': {
'port': 80,
}, "labels": {
'io.rancher.scheduler.global': 'true'
}
}, stackId=env.id)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
maps = _wait_until_active_map_count(service, 5, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
initial_len = len(c.healthcheckInstanceHostMaps())
assert initial_len == 3
for h in c.healthcheckInstanceHostMaps():
assert h.healthState == c.healthState
hcihm = c.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm.host().uuid)
assert len(hosts) == 1
host = hosts[0]
# remove the host
host = super_client.wait_success(host.deactivate())
host = super_client.wait_success(super_client.delete(host))
assert host.state == 'removed'
# verify that new hostmap was created for the instance
wait_for(lambda: len(c.healthcheckInstanceHostMaps()) == initial_len)
hcim = None
for h in c.healthcheckInstanceHostMaps():
if h.hostId == host.id:
if hcihm.state == 'active':
hcihm = h
break
assert hcim is None
remove_service(service)
def test_healtcheck(new_context, super_client):
client = new_context.client
image_uuid = new_context.image_uuid
stack = client.create_stack(name='env-' + random_str())
host = register_simulated_host(new_context)
client.wait_success(host)
# create dummy service to span network agents
multiport = client.create_service(name='foo', launchConfig={
'imageUuid': new_context.image_uuid,
'ports': "54557"
}, stackId=stack.id, scale=2)
multiport = client.wait_success(client.wait_success(multiport).activate())
assert multiport.state == 'active'
multiport.remove()
# test that external service was set with healtcheck
health_check = {"name": "check1", "responseTimeout": 3,
"interval": 4, "healthyThreshold": 5,
"unhealthyThreshold": 6, "requestLine": "index.html",
"port": 200}
launch_config = {"imageUuid": image_uuid, "healthCheck": health_check}
service = client.create_service(name=random_str(),
stackId=stack.id,
launchConfig=launch_config)
service = client.wait_success(service)
service = client.wait_success(service.activate(), 120)
maps = _wait_until_active_map_count(service, 1, client)
expose_map = maps[0]
c = super_client.reload(expose_map.instance())
c_host_id = super_client.reload(c).instanceHostMaps()[0].hostId
health_c = super_client. \
list_healthcheckInstance(accountId=service.accountId, instanceId=c.id)
assert len(health_c) > 0
health_id = health_c[0].id
def validate_container_host(host_maps):
for host_map in host_maps:
assert host_map.hostId != c_host_id
host_maps = _wait_health_host_count(super_client, health_id, 3)
validate_container_host(host_maps)
# reactivate the service and
# verify that its still has less than 3 healthchecks
service = client.wait_success(service.deactivate(), 120)
# wait for the service state
wait_state(client, service.activate(), "active")
service = client.reload(service)
host_maps = _wait_health_host_count(super_client, health_id, 3)
validate_container_host(host_maps)
# reactivate the service, add 3 more hosts and verify
# that healthcheckers number was completed to 3, excluding
# container's host
service = client.wait_success(service.deactivate(), 120)
for i in range(0, 3):
host = register_simulated_host(new_context)
client.wait_success(host)
multiport = client.create_service(name='bar', launchConfig={
'imageUuid': new_context.image_uuid,
'ports': "54558"
}, stackId=stack.id, scale=5)
multiport = client.wait_success(client.wait_success(multiport).activate())
assert multiport.state == 'active'
multiport.remove()
client.wait_success(service.activate(), 120)
host_maps = _wait_health_host_count(super_client, health_id, 3)
validate_container_host(host_maps)
remove_service(service)
def _wait_health_host_count(super_client, health_id, count):
def active_len():
match = super_client. \
list_healthcheckInstanceHostMap(healthcheckInstanceId=health_id,
state='active')
if len(match) <= count:
return match
return wait_for(active_len)
def test_external_svc_healthcheck(client, context):
env = client.create_stack(name='env-' + random_str())
# test that external service was set with healtcheck
health_check = {"name": "check1", "responseTimeout": 3,
"interval": 4, "healthyThreshold": 5,
"unhealthyThreshold": 6, "requestLine": "index.html",
"port": 200}
ips = ["72.22.16.5", '192.168.0.10']
service = client.create_externalService(name=random_str(),
stackId=env.id,
externalIpAddresses=ips,
healthCheck=health_check)
service = client.wait_success(service)
assert service.healthCheck.name == "check1"
assert service.healthCheck.responseTimeout == 3
assert service.healthCheck.interval == 4
assert service.healthCheck.healthyThreshold == 5
assert service.healthCheck.unhealthyThreshold == 6
assert service.healthCheck.requestLine == "index.html"
assert service.healthCheck.port == 200
# test rancher-compose export
compose_config = env.exportconfig()
assert compose_config is not None
document = yaml.load(compose_config.rancherComposeConfig)
assert document['services'][service.name]['health_check'] is not None
def _update_healthy(agent, hcihm, c, super_client):
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c).healthState == 'healthy')
return hcihm
def _update_unhealthy(agent, hcihm, c, super_client):
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something bad',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
wait_for(lambda: super_client.reload(c).healthState == 'unhealthy')
def _wait_until_active_map_count(service, count, client):
def wait_for_map_count(service):
m = client. \
list_serviceExposeMap(serviceId=service.id, state='active')
return len(m) == count
wait_for_condition(client, service, wait_for_map_count)
return client. \
list_serviceExposeMap(serviceId=service.id, state='active')
def test_global_service_health(new_context):
client = new_context.client
host1 = new_context.host
host2 = register_simulated_host(new_context)
client.wait_success(host1)
client.wait_success(host2)
# create stack and services
env = client.create_stack(name='env-' + random_str())
image_uuid = new_context.image_uuid
labels = {"io.rancher.container.start_once": "true"}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"labels": labels}
launch_config = {
"imageUuid": image_uuid,
"labels": {
'io.rancher.scheduler.global': 'true'
}
}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = client.wait_success(svc.activate(), 120)
wait_for(lambda: client.reload(svc).healthState == 'healthy')
# stop instances
c1 = _validate_compose_instance_start(client, svc, env, "1",
"secondary")
c2 = _validate_compose_instance_start(client, svc, env, "2",
"secondary")
client.wait_success(c1.stop())
client.wait_success(c2.stop())
wait_for(lambda: client.reload(svc).healthState == 'healthy')
wait_for(lambda: client.reload(env).healthState == 'healthy')
wait_for(lambda: client.reload(new_context.
project).healthState == 'healthy')
def _validate_compose_instance_start(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if \
launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
def wait_for_map_count(service):
instances = client. \
list_container(name=name,
state="running")
return len(instances) == 1
wait_for(lambda: wait_for_condition(client, service,
wait_for_map_count), timeout=5)
instances = client. \
list_container(name=name,
state="running")
return instances[0]
def test_stack_health_state(super_client, context, client):
c = client
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid
}, stackId=env.id)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: c.reload(env).healthState == 'healthy')
remove_service(svc)
wait_for(lambda: super_client.reload(svc).state == 'removed')
time.sleep(3)
wait_for(lambda: c.reload(env).healthState == 'healthy')
# Skipped
def _test_du_removal(super_client, new_context):
host = super_client.reload(register_simulated_host(new_context))
super_client.wait_success(host)
client = new_context.client
env = client.create_stack(name='env-' + random_str())
svc = client.create_service(name='test', launchConfig={
'imageUuid': new_context.image_uuid,
'healthCheck': {
'port': 80,
}
}, stackId=env.id, scale=1)
svc = client.wait_success(client.wait_success(svc).activate())
assert svc.state == 'active'
wait_for(lambda: super_client.reload(svc).healthState == 'initializing')
wait_for(lambda: client.reload(env).healthState == 'initializing')
maps = _wait_until_active_map_count(svc, 1, client)
expose_map = maps[0]
c1 = super_client.reload(expose_map.instance())
hci = find_one(c1.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(new_context, c1)
assert hcihm.healthState == 'initializing'
assert c1.healthState == 'initializing'
assert c1.healthcheckStates is not None
assert len(c1.healthcheckStates) == 1
assert c1.healthcheckStates[0].healthState == 'initializing'
assert c1.healthcheckStates[0].hostId == hcihm.hostId
wait_for(lambda: super_client.reload(svc).healthState == 'initializing')
wait_for(lambda: super_client.reload(env).healthState == 'initializing')
wait_for(lambda: client.reload(new_context.project).
healthState == 'initializing')
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
wait_for(lambda: super_client.reload(c1).healthState == 'healthy')
wait_for(lambda: super_client.reload(svc).healthState == 'healthy')
wait_for(lambda: super_client.reload(env).healthState == 'healthy')
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='DOWN',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
wait_for(lambda: super_client.reload(c1).state == 'removed')
maps = _wait_until_active_map_count(svc, 1, super_client)
expose_map = maps[0]
c2 = super_client.reload(expose_map.instance())
assert c1.deploymentUnitUuid != c2.deploymentUnitUuid
def test_balancer_svc_upgrade(client, context, super_client):
env = client.create_stack(name='env-' + random_str())
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
lb_svc = client. \
create_loadBalancerService(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
lbConfig={})
lb_svc = client.wait_success(lb_svc)
lb_svc = client.wait_success(lb_svc.activate())
assert len(lb_svc.launchConfig.labels) == 2
assert lb_svc.launchConfig.healthCheck is not None
launch_config = {"imageUuid": image_uuid,
'labels': {'foo': 'bar'}}
strategy = {"launchConfig": launch_config,
"intervalMillis": 100}
lb_svc.upgrade_action(inServiceStrategy=strategy)
m = super_client. \
list_serviceExposeMap(serviceId=lb_svc.id,
state='active', managed=True)
def wait_for_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active')
return len(m) == 2
def wait_for_managed_map_count(service):
m = super_client. \
list_serviceExposeMap(serviceId=service.id, state='active',
managed=True)
return len(m) == 1
wait_for_condition(client, lb_svc, wait_for_map_count)
wait_for_condition(client, lb_svc, wait_for_managed_map_count)
m = super_client. \
list_serviceExposeMap(serviceId=lb_svc.id,
state='active', managed=True)
c = super_client.reload(m[0].instance())
wait_for(lambda: super_client.reload(c).state == 'running')
c = super_client.reload(m[0].instance())
assert c.healthState == 'initializing'
hci = find_one(c.healthcheckInstances)
for hcihm in hci.healthcheckInstanceHostMaps():
agent = _get_agent_for_container(context, c)
assert hcihm.healthState == 'initializing'
_update_healthy(agent, hcihm, c, super_client)
lb_svc = client.wait_success(lb_svc)
assert lb_svc.launchConfig.healthCheck is not None
assert len(lb_svc.launchConfig.labels) == 3
| apache-2.0 | -4,785,318,302,728,767,000 | 36.600256 | 79 | 0.629996 | false |
jrg365/gpytorch | test/variational/test_independent_multitask_variational_strategy.py | 1 | 3533 | #!/usr/bin/env python3
import unittest
import torch
import gpytorch
from gpytorch.test.variational_test_case import VariationalTestCase
def likelihood_cls():
return gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=2)
def strategy_cls(model, inducing_points, variational_distribution, learn_inducing_locations):
return gpytorch.variational.IndependentMultitaskVariationalStrategy(
gpytorch.variational.VariationalStrategy(
model, inducing_points, variational_distribution, learn_inducing_locations
),
num_tasks=2,
)
class TestMultitaskVariationalGP(VariationalTestCase, unittest.TestCase):
@property
def batch_shape(self):
return torch.Size([2])
@property
def event_shape(self):
return torch.Size([32, 2])
@property
def distribution_cls(self):
return gpytorch.variational.CholeskyVariationalDistribution
@property
def likelihood_cls(self):
return likelihood_cls
@property
def mll_cls(self):
return gpytorch.mlls.VariationalELBO
@property
def strategy_cls(self):
return strategy_cls
def test_training_iteration(self, *args, expected_batch_shape=None, **kwargs):
expected_batch_shape = expected_batch_shape or self.batch_shape
expected_batch_shape = expected_batch_shape[:-1]
cg_mock, cholesky_mock = super().test_training_iteration(
*args, expected_batch_shape=expected_batch_shape, **kwargs
)
self.assertFalse(cg_mock.called)
self.assertEqual(cholesky_mock.call_count, 2) # One for each forward pass
def test_eval_iteration(self, *args, expected_batch_shape=None, **kwargs):
expected_batch_shape = expected_batch_shape or self.batch_shape
expected_batch_shape = expected_batch_shape[:-1]
cg_mock, cholesky_mock = super().test_eval_iteration(*args, expected_batch_shape=expected_batch_shape, **kwargs)
self.assertFalse(cg_mock.called)
self.assertEqual(cholesky_mock.call_count, 1) # One to compute cache, that's it!
class TestMultitaskPredictiveGP(TestMultitaskVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.PredictiveLogLikelihood
class TestMultitaskRobustVGP(TestMultitaskVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.GammaRobustVariationalELBO
class TestMeanFieldMultitaskVariationalGP(TestMultitaskVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestMeanFieldMultitaskPredictiveGP(TestMultitaskPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestMeanFieldMultitaskRobustVGP(TestMultitaskRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestDeltaMultitaskVariationalGP(TestMultitaskVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestDeltaMultitaskPredictiveGP(TestMultitaskPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestDeltaMultitaskRobustVGP(TestMultitaskRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
if __name__ == "__main__":
unittest.main()
| mit | -2,250,295,315,782,813,200 | 29.721739 | 120 | 0.735918 | false |
sradanov/flyingpigeon | setup.py | 1 | 1385 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'cdo',
'bokeh',
'ocgis',
'pandas',
'nose',
]
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='flyingpigeon',
version='0.2.0',
description='Processes for climate data, indices and extrem events',
long_description=README + '\n\n' + CHANGES,
classifiers=classifiers,
author='Nils Hempelmann',
author_email='[email protected]',
url='http://www.lsce.ipsl.fr/',
license = "http://www.apache.org/licenses/LICENSE-2.0",
keywords='wps flyingpigeon pywps malleefowl ipsl birdhouse conda anaconda',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=requires,
entry_points = {
'console_scripts': [
]}
,
)
| apache-2.0 | 871,063,087,530,311,400 | 29.108696 | 81 | 0.607942 | false |
apurtell/phoenix | bin/psql.py | 1 | 2718 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
args = phoenix_utils.shell_quote(sys.argv[1:])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = os.getenv('HBASE_CONF_DIR', phoenix_utils.current_dir)
java_home = os.getenv('JAVA_HOME')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = x.partition('=')
hbase_env[k.strip()] = v.strip()
if hbase_env.has_key('JAVA_HOME'):
java_home = hbase_env['JAVA_HOME']
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
java_cmd = java + ' $PHOENIX_OPTS ' + \
' -cp "' + phoenix_utils.hbase_conf_dir + os.pathsep + phoenix_utils.phoenix_client_jar + \
os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" org.apache.phoenix.util.PhoenixRuntime " + args
os.execl("/bin/sh", "/bin/sh", "-c", java_cmd)
| apache-2.0 | -4,959,389,883,573,509,000 | 37.28169 | 126 | 0.654157 | false |
huiyiqun/check_mk | tests/unit/checks/test_ra32e_sensors_check.py | 1 | 4376 | # -*- coding: utf-8 -*-
import pytest
from checktestlib import BasicCheckResult
pytestmark = pytest.mark.checks
@pytest.mark.parametrize('info,discoveries_expected,checks_expected', [
( # internal temperature
[[[u'2070', u'', u'']], []],
[
('ra32e_sensors', [('Internal', {})]),
('ra32e_sensors.humidity', []),
],
[
('ra32e_sensors', "Internal", {}, BasicCheckResult(0, u'20.7 °C', [('temp', 20.70)])),
('ra32e_sensors', "Heat Index", {}, BasicCheckResult(3, 'no data for sensor')),
('ra32e_sensors.humidity', "Internal", {}, BasicCheckResult(3, 'no data for sensor')),
]
),
( # internal humidity and heat index
[[[u'', u'6000', u'2070']], []],
[
('ra32e_sensors', [('Heat Index', {})]),
('ra32e_sensors.humidity', [('Internal', {})]),
],
[
('ra32e_sensors', "Internal", {},
BasicCheckResult(3, 'no data for sensor')),
('ra32e_sensors', "Heat Index", {},
BasicCheckResult(0, u'20.7 °C', [('temp', 20.70)])),
('ra32e_sensors.humidity', "Internal", {},
BasicCheckResult(0, '60.0%', [('humidity', 60.0, 101, 101, 0, 100)])),
]
),
( # temp sensor (ignores fahrenheit value)
[[[u'', u'', u'']], [[u'2.0', u'2580', u'9999', '', '', '']]],
[
('ra32e_sensors', [('Sensor 2', {})]),
],
[
('ra32e_sensors', "Sensor 2", {},
BasicCheckResult(0, u'25.8 °C', [('temp', 25.8)])),
]
),
( # temp/active sensor
[[[u'', u'', u'']], [[u'5.0', u'3100', '9999', '0', '', '']]],
[
('ra32e_sensors', [('Sensor 5', {})]),
('ra32e_sensors.power', [('Sensor 5', {})]),
],
[
('ra32e_sensors', "Sensor 5", {'levels': (30.0, 35.0)},
BasicCheckResult(1, u'31.0 °C (warn/crit at 30.0/35.0 °C)', [('temp', 31.0, 30.0, 35.0)])),
('ra32e_sensors.power', "Sensor 5", {},
BasicCheckResult(2, 'Device status: no power detected(2)')),
('ra32e_sensors.power', "Sensor 5", {'map_device_states': [('no power detected', 1)]},
BasicCheckResult(1, 'Device status: no power detected(2)')),
]
),
( # temp/analog and humidity sensor
[[[u'', u'', u'']], [[u'1.0', u'2790', '9999', '7500', '9999', '2800'],
[u'8.0', u'2580', '9999', '200', '9999', '']]],
[
('ra32e_sensors', [('Heat Index 1', {}), ('Sensor 1', {}), ('Sensor 8', {})]),
('ra32e_sensors.voltage', [('Sensor 8', {})]),
('ra32e_sensors.humidity', [('Sensor 1', {})]),
],
[
('ra32e_sensors', "Sensor 8", {},
BasicCheckResult(0, u'25.8 °C', [('temp', 25.8)])),
('ra32e_sensors', "Heat Index 1", {'levels': (27.0, 28.0)},
BasicCheckResult(2, u'28.0 °C (warn/crit at 27.0/28.0 °C)', [('temp', 28.0, 27.0, 28.0)])),
('ra32e_sensors.voltage', "Sensor 8", {'voltage': (210, 180)},
BasicCheckResult(1, 'Voltage: 200 V (warn/crit below 210/180 V)', [('voltage', 200)])),
('ra32e_sensors', "Sensor 1", {'levels_lower': (30.0, 25.0)},
BasicCheckResult(1, u'27.9 °C (warn/crit below 30.0/25.0 °C)', [('temp', 27.9)])),
('ra32e_sensors.humidity', "Sensor 1", {'levels_lower': (85.0, 75.0)},
BasicCheckResult(1, '75.0% (warn/crit below 85.0%/75.0%)', [('humidity', 75.0, None, None, 0, 100)])),
]),
])
def test_ra32e_sensors_inputs(check_manager, info, discoveries_expected, checks_expected):
ra32e_sensors_checks = [
'ra32e_sensors',
'ra32e_sensors.humidity',
'ra32e_sensors.voltage',
'ra32e_sensors.power'
]
checks = {name: check_manager.get_check(name) for name in ra32e_sensors_checks}
parsed = checks['ra32e_sensors'].run_parse(info)
for check, expected in discoveries_expected:
result = checks[check].run_discovery(parsed)
assert sorted(result) == expected
for check, item, params, expected in checks_expected:
output = checks[check].run_check(item, params, parsed)
result = BasicCheckResult(*output)
assert result == expected
| gpl-2.0 | 4,729,765,366,589,821,000 | 42.227723 | 115 | 0.491296 | false |
BenWiederhake/House-Of-Tweets | backend/vomit.py | 1 | 1212 | #!/usr/bin/env python3
import json
import mq
from sys import argv
from time import sleep
def pull(filename):
with open(filename, 'r') as fp:
return json.load(fp)
def check(tweets):
assert len(tweets) > 0
first_batch = tweets[0]
assert len(first_batch) > 0
first_tweet = first_batch[0]
EXPECT_KEYS = {'content', 'hashtags', 'id', 'image', 'name',
'partycolor', 'retweet', 'sound', 'time', 'twitterName'}
# Implicit assertion: first_tweet is a dict
assert EXPECT_KEYS.issubset(first_tweet.keys()), first_tweet.keys()
# Waiting period, in milliseconds, between each sent batch
PERIOD_MS = 3000
def vomit(tweets):
print('Now vomiting {} tweet-batches all over the place.'.format(len(tweets)))
q = mq.RealQueue('tweets')
for batch in tweets:
q.post(batch)
sleep(PERIOD_MS / 1000.0)
def transfer_file(filename):
tweets = pull(filename)
check(tweets)
vomit(tweets)
if __name__ == '__main__':
if len(argv) != 1 + 1:
print('{}: need precisely one argument: the name of the tweets JSON file.'.format(argv[0]))
exit(1)
else:
transfer_file(argv[1]) # argv[0] is the program name
| gpl-3.0 | 2,856,715,746,268,283,000 | 24.25 | 99 | 0.626238 | false |
zzqcn/wireshark | tools/make-plugin-reg.py | 1 | 5547 | #!/usr/bin/env python3
#
# Looks for registration routines in the plugins
# and assembles C code to call all the routines.
#
import os
import sys
import re
#
# The first argument is the directory in which the source files live.
#
srcdir = sys.argv[1]
#
# The second argument is either "plugin", "plugin_wtap" or "plugin_codec".
#
registertype = sys.argv[2]
#
# All subsequent arguments are the files to scan.
#
files = sys.argv[3:]
final_filename = "plugin.c"
preamble = """\
/*
* Do not modify this file. Changes will be overwritten.
*
* Generated automatically from %s.
*/
""" % (sys.argv[0])
# Create the proper list of filenames
filenames = []
for file in files:
if os.path.isfile(file):
filenames.append(file)
else:
filenames.append(os.path.join(srcdir, file))
if len(filenames) < 1:
print("No files found")
sys.exit(1)
# Look through all files, applying the regex to each line.
# If the pattern matches, save the "symbol" section to the
# appropriate set.
regs = {
'proto_reg': set(),
'handoff_reg': set(),
'wtap_register': set(),
'codec_register': set(),
}
# For those that don't know Python, r"" indicates a raw string,
# devoid of Python escapes.
proto_regex = r"\bproto_register_(?P<symbol>[_A-Za-z0-9]+)\s*\(\s*void\s*\)[^;]*$"
handoff_regex = r"\bproto_reg_handoff_(?P<symbol>[_A-Za-z0-9]+)\s*\(\s*void\s*\)[^;]*$"
wtap_reg_regex = r"\bwtap_register_(?P<symbol>[_A-Za-z0-9]+)\s*\([^;]+$"
codec_reg_regex = r"\bcodec_register_(?P<symbol>[_A-Za-z0-9]+)\s*\([^;]+$"
# This table drives the pattern-matching and symbol-harvesting
patterns = [
( 'proto_reg', re.compile(proto_regex, re.MULTILINE) ),
( 'handoff_reg', re.compile(handoff_regex, re.MULTILINE) ),
( 'wtap_register', re.compile(wtap_reg_regex, re.MULTILINE) ),
( 'codec_register', re.compile(codec_reg_regex, re.MULTILINE) ),
]
# Grep
for filename in filenames:
file = open(filename)
# Read the whole file into memory
contents = file.read()
for action in patterns:
regex = action[1]
for match in regex.finditer(contents):
symbol = match.group("symbol")
sym_type = action[0]
regs[sym_type].add(symbol)
# We're done with the file contents
del contents
file.close()
# Make sure we actually processed something
if (len(regs['proto_reg']) < 1 and len(regs['wtap_register']) < 1 and len(regs['codec_register']) < 1):
print("No plugin registrations found")
sys.exit(1)
# Convert the sets into sorted lists to make the output pretty
regs['proto_reg'] = sorted(regs['proto_reg'])
regs['handoff_reg'] = sorted(regs['handoff_reg'])
regs['wtap_register'] = sorted(regs['wtap_register'])
regs['codec_register'] = sorted(regs['codec_register'])
reg_code = ""
reg_code += preamble
reg_code += """
#include "config.h"
#include <gmodule.h>
/* plugins are DLLs */
#define WS_BUILD_DLL
#include "ws_symbol_export.h"
"""
if registertype == "plugin":
reg_code += "#include \"epan/proto.h\"\n\n"
if registertype == "plugin_wtap":
reg_code += "#include \"wiretap/wtap.h\"\n\n"
if registertype == "plugin_codec":
reg_code += "#include \"wsutil/codecs.h\"\n\n"
for symbol in regs['proto_reg']:
reg_code += "void proto_register_%s(void);\n" % (symbol)
for symbol in regs['handoff_reg']:
reg_code += "void proto_reg_handoff_%s(void);\n" % (symbol)
for symbol in regs['wtap_register']:
reg_code += "void wtap_register_%s(void);\n" % (symbol)
for symbol in regs['codec_register']:
reg_code += "void codec_register_%s(void);\n" % (symbol)
reg_code += """
WS_DLL_PUBLIC_DEF const gchar plugin_version[] = PLUGIN_VERSION;
WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR;
WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR;
WS_DLL_PUBLIC void plugin_register(void);
void plugin_register(void)
{
"""
if registertype == "plugin":
for symbol in regs['proto_reg']:
reg_code +=" static proto_plugin plug_%s;\n\n" % (symbol)
reg_code +=" plug_%s.register_protoinfo = proto_register_%s;\n" % (symbol, symbol)
if symbol in regs['handoff_reg']:
reg_code +=" plug_%s.register_handoff = proto_reg_handoff_%s;\n" % (symbol, symbol)
else:
reg_code +=" plug_%s.register_handoff = NULL;\n" % (symbol)
reg_code += " proto_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_wtap":
for symbol in regs['wtap_register']:
reg_code += " static wtap_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_wtap_module = wtap_register_%s;\n" % (symbol, symbol)
reg_code += " wtap_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_codec":
for symbol in regs['codec_register']:
reg_code += " static codecs_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_codec_module = codec_register_%s;\n" % (symbol, symbol)
reg_code += " codecs_register_plugin(&plug_%s);\n" % (symbol)
reg_code += "}\n"
try:
fh = open(final_filename, 'w')
fh.write(reg_code)
fh.close()
print('Generated {} for {}.'.format(final_filename, os.path.basename(srcdir)))
except OSError:
sys.exit('Unable to write ' + final_filename + '.\n')
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
| gpl-2.0 | 3,218,243,372,919,853,000 | 29.478022 | 103 | 0.628989 | false |
alphagov/notifications-api | app/dao/api_key_dao.py | 1 | 1564 | import uuid
from datetime import datetime, timedelta
from sqlalchemy import func, or_
from app import db
from app.dao.dao_utils import autocommit, version_class
from app.models import ApiKey
@autocommit
@version_class(ApiKey)
def save_model_api_key(api_key):
if not api_key.id:
api_key.id = uuid.uuid4() # must be set now so version history model can use same id
api_key.secret = uuid.uuid4()
db.session.add(api_key)
@autocommit
@version_class(ApiKey)
def expire_api_key(service_id, api_key_id):
api_key = ApiKey.query.filter_by(id=api_key_id, service_id=service_id).one()
api_key.expiry_date = datetime.utcnow()
db.session.add(api_key)
def get_model_api_keys(service_id, id=None):
if id:
return ApiKey.query.filter_by(id=id, service_id=service_id, expiry_date=None).one()
seven_days_ago = datetime.utcnow() - timedelta(days=7)
return ApiKey.query.filter(
or_(ApiKey.expiry_date == None, func.date(ApiKey.expiry_date) > seven_days_ago), # noqa
ApiKey.service_id == service_id
).all()
def get_unsigned_secrets(service_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_keys = ApiKey.query.filter_by(service_id=service_id, expiry_date=None).all()
keys = [x.secret for x in api_keys]
return keys
def get_unsigned_secret(key_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_key = ApiKey.query.filter_by(id=key_id, expiry_date=None).one()
return api_key.secret
| mit | 3,697,148,109,029,428,000 | 29.076923 | 96 | 0.686061 | false |
moreati/pydgin | scripts/build.py | 1 | 4989 | #!/usr/bin/env python
#=========================================================================
# build.py
#=========================================================================
# Builds pydgin.
import multiprocessing
import os
import shutil
import sys
import subprocess
import distutils.spawn
usage = """Usage:
./build.py [flags] [targets]
Flags: -h,--help this help message
-jN parallelize for N cores (omit N for # of processors)
"""
all_targets = [ "pydgin-parc-jit", "pydgin-parc-nojit-debug",
"pydgin-arm-jit", "pydgin-arm-nojit-debug" ]
def build_target( name, pypy_dir, build_dir ):
# use the name to determine the arch, jit and debug
arch = None
if "parc" in name:
arch = "parc"
if "arm" in name:
assert arch is None, "conflicting arch definitions {} and {}" \
.format( arch, "arm" )
arch = "arm"
assert arch is not None, "could not determine arch from name"
if "jit" in name and "nojit" not in name:
jit = True
elif "nojit" in name:
jit = False
else:
# default behavior if neither jit or nojit in name
jit = True
if "debug" in name and "nodebug" not in name:
debug = True
elif "nodebug" in name:
debug = False
else:
# default behavior if neither debug or nodebug in name
debug = False
print "Building {}\n arch: {}\n jit: {}\n debug: {}\n" \
.format( name, arch, jit, debug )
# check for the pypy executable, if it doesn't exist warn
python_bin = distutils.spawn.find_executable('pypy')
if not python_bin:
print ('WARNING: Cannot find a pypy executable!\n'
' Proceeding to translate with CPython.\n'
' Note that this will be *much* slower than using pypy.\n'
' Please install pypy for faster translation times!\n')
python_bin = 'python'
# create the translation command and execute it
os.chdir('../{}'.format( arch ) )
cmd = ( '{4} {1}/rpython/bin/rpython {2} {0}-sim.py {3}'
.format( arch, pypy_dir,
"--opt=jit" if jit else "",
"--debug" if debug else "",
python_bin )
)
print cmd
ret = subprocess.call( cmd, shell=True )
# check for success and cleanup
if ret != 0:
print "{} failed building, aborting!".format( name )
sys.exit( ret )
shutil.copy( name, '../scripts/{}'.format( build_dir ) )
symlink_name = '../scripts/builds/{}'.format( name )
if os.path.lexists( symlink_name ):
os.remove( symlink_name )
os.symlink( '../{}/{}'.format( build_dir, name ), symlink_name )
def setup_environment():
# assume if arg starts with a dash, it's a flag
args = sys.argv[1:]
flags = filter( lambda x: x.startswith('-'), args )
targets = filter( lambda x: not x.startswith('-'), args )
# don't parallelize by default
num_processes = 1
for flag in flags:
if flag == '-h' or flag == '--help':
print usage
sys.exit( 1 )
elif flag.startswith( '-j' ):
if flag == '-j':
# get the cpu count
num_processes = multiprocessing.cpu_count()
else:
num_processes = int( flag[2:] )
else:
print "Unknown flag:", flag
print usage
sys.exit( 1 )
# ensure we know where the pypy source code is
try:
pypy_dir = os.environ['PYDGIN_PYPY_SRC_DIR']
except KeyError as e:
raise ImportError( 'Please define the PYDGIN_PYPY_SRC_DIR '
'environment variable!')
# all includes all_targets
if "all" in targets:
targets += all_targets
targets.remove( "all" )
# unique-ify
targets = list( set( targets ) )
# if there are no targets, we add all
if len( targets ) == 0:
targets = all_targets
# get the version number
pydgin_ver = subprocess.check_output(
"./vcs-version.sh", shell=True ).rstrip()
print "Building Pydgin..."
print "Version: {}".format( pydgin_ver )
print "PyPy source: {}".format( pypy_dir )
print "Targets: {}".format( targets )
print "Number of processes: {}".format( num_processes )
# create build dir
build_dir = "builds/pydgin-{}/bin".format( pydgin_ver )
subprocess.call( "mkdir -p {}".format( build_dir ), shell=True )
return targets, pypy_dir, build_dir, num_processes
def main():
# get targets and environment
targets, pypy_dir, build_dir, num_processes = setup_environment()
# don't parallelize for 1 process
if num_processes <= 1:
for target in targets:
build_target( target, pypy_dir, build_dir )
else:
# build targets in parallel
pool = multiprocessing.Pool( processes=num_processes )
try:
for target in targets:
pool.apply_async( build_target, [target, pypy_dir, build_dir])
pool.close()
pool.join()
except KeyboardInterrupt:
print "Terminating workers!"
pool.terminate()
pool.join()
print 'Parallel builds complete.'
if __name__ == "__main__":
main()
| bsd-3-clause | -7,807,380,443,629,281,000 | 26.871508 | 74 | 0.588495 | false |
oasis-open/cti-pattern-validator | setup.py | 1 | 1795 | #!/usr/bin/env python
from setuptools import find_packages, setup
with open('README.rst') as f:
readme = f.read()
doc_requires = [
'sphinx',
'sphinx-prompt',
]
test_requires = [
'coverage',
'pytest',
'pytest-cov',
]
dev_requires = doc_requires + test_requires + [
'bumpversion',
'check-manifest',
'pre-commit',
# test_requires are installed into every tox environment, so we don't
# want to include tox there.
'tox',
]
setup(
name='stix2-patterns',
version='1.3.2',
description='Validate STIX 2 Patterns.',
long_description=readme,
long_description_content_type='text/x-rst',
url="https://github.com/oasis-open/cti-pattern-validator",
author='OASIS Cyber Threat Intelligence Technical Committee',
author_email='[email protected]',
maintainer='Chris Lenk',
maintainer_email='[email protected]',
python_requires=">=3.6",
packages=find_packages(),
install_requires=[
'antlr4-python3-runtime~=4.9.0',
'six',
],
package_data={
'stix2patterns.test.v20': ['spec_examples.txt'],
'stix2patterns.test.v21': ['spec_examples.txt'],
},
entry_points={
'console_scripts': [
'validate-patterns = stix2patterns.validator:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
extras_require={
'dev': dev_requires,
'docs': doc_requires,
'test': test_requires,
},
)
| bsd-3-clause | 2,211,151,469,891,606,000 | 25.397059 | 73 | 0.596657 | false |
alpodrezov/ordering_lunch | xavchik/settings.py | 1 | 2241 | """
Django settings for xavchik project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#@6cjvgq1^pp0*o*^8hs20ozo!27do1&-^nqc92ol%4d8)(5l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'xavchik',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xavchik.urls'
WSGI_APPLICATION = 'xavchik.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('static', '/Volumes/Macintosh HD 2 2/work_projects/python/ordering_lunch/xavchik/static'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | unlicense | -7,114,326,989,939,682,000 | 23.637363 | 95 | 0.720214 | false |
splotz90/urh | src/urh/signalprocessing/SimulatorItem.py | 1 | 2278 | class SimulatorItem(object):
protocol_manager = None
expression_parser = None
def __init__(self):
self.__parentItem = None
self.__childItems = []
self.logging_active = True
self.is_valid = True
def check(self):
return True
def get_pos(self):
if self.parent() is not None:
return self.parent().children.index(self)
return 0
def index(self):
if self.parent() is None:
return ""
item = self
result = str(item.get_pos() + 1)
while item.parent().parent() is not None:
item = item.parent()
result = str(item.get_pos() + 1) + "." + result
return result
def insert_child(self, pos, child):
child.set_parent(self)
self.children.insert(pos, child)
def delete(self):
for child in self.children[:]:
child.set_parent(None)
self.set_parent(None)
def parent(self):
return self.__parentItem
def set_parent(self, value):
if self.parent() is not None:
self.parent().children.remove(self)
self.__parentItem = value
@property
def children(self):
return self.__childItems
def child_count(self) -> int:
return len(self.children)
def next_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index < self.parent().child_count() - 1:
result = self.parent().children[index + 1]
return result
def prev_sibling(self):
result = None
index = self.get_pos()
if self.parent() and index > 0:
result = self.parent().children[index - 1]
return result
def next(self):
if self.child_count():
return self.children[0]
curr = self
while curr is not None:
if curr.next_sibling() is not None:
return curr.next_sibling()
curr = curr.parent()
return None
def prev(self):
if self.prev_sibling() is not None:
curr = self.prev_sibling()
else:
return self.parent()
while curr.child_count():
curr = curr.children[-1]
return curr | gpl-3.0 | 3,334,993,551,861,423,000 | 21.79 | 69 | 0.53863 | false |
zalf-lsa/monica | installer/Hohenfinow2/python/run-producer.py | 1 | 2337 | #!/usr/bin/python
# -*- coding: UTF-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
# Authors:
# Michael Berg-Mohnicke <[email protected]>
#
# Maintainers:
# Currently maintained by the authors.
#
# This file has been created at the Institute of
# Landscape Systems Analysis at the ZALF.
# Copyright (C: Leibniz Centre for Agricultural Landscape Research (ZALF)
import json
import sys
import zmq
import os
import monica_io
#print sys.path
#print "pyzmq version: ", zmq.pyzmq_version(), " zmq version: ", zmq.zmq_version()
def run_producer(server = {"server": None, "port": None}, shared_id = None):
context = zmq.Context()
socket = context.socket(zmq.PUSH)
config = {
"port": server["port"] if server["port"] else "6666",
"server": server["server"] if server["server"] else "localhost",
"sim.json": os.path.join(os.path.dirname(__file__), '../sim-min.json'),
"crop.json": os.path.join(os.path.dirname(__file__), '../crop-min.json'),
"site.json": os.path.join(os.path.dirname(__file__), '../site-min.json'),
"climate.csv": os.path.join(os.path.dirname(__file__), '../climate-min.csv'),
"shared_id": shared_id
}
# read commandline args only if script is invoked directly from commandline
if len(sys.argv) > 1 and __name__ == "__main__":
for arg in sys.argv[1:]:
k, v = arg.split("=")
if k in config:
config[k] = v
print "config:", config
socket.connect("tcp://" + config["server"] + ":" + config["port"])
with open(config["sim.json"]) as _:
sim_json = json.load(_)
with open(config["site.json"]) as _:
site_json = json.load(_)
with open(config["crop.json"]) as _:
crop_json = json.load(_)
with open(config["climate.csv"]) as _:
climate_csv = _.read()
env = monica_io.create_env_json_from_json_config({
"crop": crop_json,
"site": site_json,
"sim": sim_json,
"climate": climate_csv
})
#print env
# add shared ID if env to be sent to routable monicas
if config["shared_id"]:
env["sharedId"] = config["shared_id"]
socket.send_json(env)
print "done"
if __name__ == "__main__":
run_producer() | mpl-2.0 | -8,362,076,501,140,330,000 | 26.833333 | 83 | 0.620454 | false |
JacobFischer/Joueur.py | games/anarchy/building.py | 1 | 3963 | # Building: A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.anarchy.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Building(GameObject):
"""The class representing the Building in the Anarchy game.
A basic building. It does nothing besides burn down. Other Buildings inherit from this class.
"""
def __init__(self):
"""Initializes a Building with basic logic as provided by the Creer code generator."""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._bribed = False
self._building_east = None
self._building_north = None
self._building_south = None
self._building_west = None
self._fire = 0
self._health = 0
self._is_headquarters = False
self._owner = None
self._x = 0
self._y = 0
@property
def bribed(self):
"""When True this building has already been bribed this turn and cannot be bribed again this turn.
:rtype: bool
"""
return self._bribed
@property
def building_east(self):
"""The Building directly to the east of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_east
@property
def building_north(self):
"""The Building directly to the north of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_north
@property
def building_south(self):
"""The Building directly to the south of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_south
@property
def building_west(self):
"""The Building directly to the west of this building, or None if not present.
:rtype: games.anarchy.building.Building
"""
return self._building_west
@property
def fire(self):
"""How much fire is currently burning the building, and thus how much damage it will take at the end of its owner's turn. 0 means no fire.
:rtype: int
"""
return self._fire
@property
def health(self):
"""How much health this building currently has. When this reaches 0 the Building has been burned down.
:rtype: int
"""
return self._health
@property
def is_headquarters(self):
"""True if this is the Headquarters of the owning player, False otherwise. Burning this down wins the game for the other Player.
:rtype: bool
"""
return self._is_headquarters
@property
def owner(self):
"""The player that owns this building. If it burns down (health reaches 0) that player gets an additional bribe(s).
:rtype: games.anarchy.player.Player
"""
return self._owner
@property
def x(self):
"""The location of the Building along the x-axis.
:rtype: int
"""
return self._x
@property
def y(self):
"""The location of the Building along the y-axis.
:rtype: int
"""
return self._y
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| mit | -4,873,034,364,806,907,000 | 30.452381 | 146 | 0.633611 | false |
gento/dionaea | modules/python/scripts/pptp/include/packets.py | 1 | 4630 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2015 Tan Kean Siong
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact [email protected]
#*
#*******************************************************************************/
from dionaea.smb.include.packet import *
from dionaea.smb.include.fieldtypes import *
#PPTP Control Message Types
PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST = 0x01
PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY = 0x02
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST = 0x07
PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY = 0x08
#PPP Link Control Protocol Types
PPP_LCP_Configuration_Request = 0x01
# https://www.ietf.org/rfc/rfc2637.txt
class PPTP_StartControlConnection_Request(Packet):
name="PPTP Start-Control-Connection-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("ProtocolVersion",0),
XShortField("Reserved",0),
XIntField("FramingCapabilites",0),
XIntField("BearerCapabilites",0),
XShortField("MaxChannels",0),
XShortField("FirmwareRevision",0),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_StartControlConnection_Reply(Packet):
name="PPTP Start-Control-Connection-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_STARTCTRCON_REPLY
fields_desc =[
XShortField("Length",0x9c),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x02),
XShortField("Reserved",0),
LEShortField("ProtocolVersion",0x01),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
LEIntField("FramingCapabilites",0),
LEIntField("BearerCapabilites",0),
XShortField("MaxChannels",1),
XShortField("FirmwareRevision",1),
StrFixedLenField("HostName", "", 64),
StrFixedLenField("VendorName", "", 64),
]
class PPTP_OutgoingCall_Request(Packet):
name="PPTP Outgoing-Call-Request"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REQUEST
fields_desc =[
XShortField("Length",0),
XShortField("MessageType",0),
XIntField("MagicCookie",0),
XShortField("ControlMessageType",0),
XShortField("Reserved",0),
XShortField("CallID",0),
XShortField("CallSerialNumber",0),
XIntField("MinBPS",0),
XIntField("MaxBPS",0),
XIntField("BearerType",0),
XIntField("FramingType",0),
XShortField("PacketWindowSize",0),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("Reserved",0),
StrFixedLenField("PhoneNumber", "", 64),
StrFixedLenField("Subaddress", "", 64),
]
class PPTP_OutgoingCall_Reply(Packet):
name="PPTP Outgoing-Call-Reply"
controlmessage_type = PPTP_CTRMSG_TYPE_OUTGOINGCALL_REPLY
fields_desc =[
XShortField("Length",0x20),
XShortField("MessageType",0x01),
XIntField("MagicCookie",0x1a2b3c4d),
XShortField("ControlMessageType",0x08),
XShortField("Reserved",0),
XShortField("CallID",0x480),
XShortField("PeerCallID",0),
ByteField("ResultCode",0x01),
ByteField("ErrorCode",0x00),
XShortField("CauseCode",0),
XIntField("ConnectSpeed",0x05F5E100),
XShortField("PacketWindowSize",0x2000),
XShortField("PacketProcessingDelay",0),
XShortField("PacketNumberLength",0),
XShortField("PhysicalChannelID",0),
]
class PPTP(Packet):
name="PPTP"
fields_desc =[
ByteField("Address",0),
ByteField("Control",0),
XShortField("Protocol",0),
]
class PPP_LCP_Configuration_Request(Packet):
name="PPP LCP_Configuration_Request"
controlmessage_type = PPP_LCP_Configuration_Request
fields_desc =[
ByteField("Code",0),
ByteField("Identifier",0),
XShortField("Length",0),
StrFixedLenField("Options", b"", length_from=lambda pkt: pkt.Length-4),
]
| gpl-2.0 | -748,663,431,541,799,300 | 31.605634 | 82 | 0.700432 | false |
dmilith/SublimeText3-dmilith | Packages/pyte/all/pyte/graphics.py | 1 | 3441 | # -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import unicode_literals
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough",
}
#: A mapping of ANSI foreground color codes to color names.
#:
#: >>> FG_ANSI[30]
#: 'black'
#: >>> FG_ANSI[38]
#: 'default'
FG_ANSI = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default" # white.
}
#: An alias to :data:`~pyte.graphics.FG_ANSI` for compatibility.
FG = FG_ANSI
#: A mapping of non-standard ``aixterm`` foreground color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
FG_AIXTERM = {
90: "black",
91: "red",
92: "green",
93: "brown",
94: "blue",
95: "magenta",
96: "cyan",
97: "white"
}
#: A mapping of ANSI background color codes to color names.
#:
#: >>> BG_ANSI[40]
#: 'black'
#: >>> BG_ANSI[48]
#: 'default'
BG_ANSI = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default" # black.
}
#: An alias to :data:`~pyte.graphics.BG_ANSI` for compatibility.
BG = BG_ANSI
#: A mapping of non-standard ``aixterm`` background color codes to
#: color names. These are high intensity colors and thus should be
#: complemented by ``+bold``.
BG_AIXTERM = {
100: "black",
101: "red",
102: "green",
103: "brown",
104: "blue",
105: "magenta",
106: "cyan",
107: "white"
}
#: SGR code for foreground in 256 or True color mode.
FG_256 = 38
#: SGR code for background in 256 or True color mode.
BG_256 = 48
#: A table of 256 foreground or background colors.
# The following code is part of the Pygments project (BSD licensed).
FG_BG_256 = [
(0x00, 0x00, 0x00), # 0
(0xcd, 0x00, 0x00), # 1
(0x00, 0xcd, 0x00), # 2
(0xcd, 0xcd, 0x00), # 3
(0x00, 0x00, 0xee), # 4
(0xcd, 0x00, 0xcd), # 5
(0x00, 0xcd, 0xcd), # 6
(0xe5, 0xe5, 0xe5), # 7
(0x7f, 0x7f, 0x7f), # 8
(0xff, 0x00, 0x00), # 9
(0x00, 0xff, 0x00), # 10
(0xff, 0xff, 0x00), # 11
(0x5c, 0x5c, 0xff), # 12
(0xff, 0x00, 0xff), # 13
(0x00, 0xff, 0xff), # 14
(0xff, 0xff, 0xff), # 15
]
# colors 16..231: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(216):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
FG_BG_256.append((r, g, b))
# colors 232..255: grayscale
for i in range(24):
v = 8 + i * 10
FG_BG_256.append((v, v, v))
FG_BG_256 = ["{0:02x}{1:02x}{2:02x}".format(r, g, b) for r, g, b in FG_BG_256]
| mit | 1,003,358,980,977,163,300 | 22.09396 | 78 | 0.561465 | false |
BrainTech/openbci | obci/logic/logic_speller_peer.py | 1 | 1099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <[email protected]>
#
import time
from obci.utils import tags_helper
from multiplexer.multiplexer_constants import peers, types
from obci.logic import logic_helper
from obci.logic.logic_decision_peer import LogicDecision
from obci.logic.engines.speller_engine import SpellerEngine
from obci.utils import context as ctx
from obci.configs import settings, variables_pb2
from obci.utils.openbci_logging import log_crash
class LogicSpeller(LogicDecision, SpellerEngine):
"""A class for creating a manifest file with metadata."""
@log_crash
def __init__(self, addresses):
LogicDecision.__init__(self, addresses=addresses)
context = ctx.get_new_context()
context['logger'] = self.logger
SpellerEngine.__init__(self, self.config.param_values(), context)
self.ready()
self._update_letters()
def _run_post_actions(self, p_decision):
self._update_letters()
if __name__ == "__main__":
LogicSpeller(settings.MULTIPLEXER_ADDRESSES).loop()
| gpl-3.0 | -3,186,616,234,418,498,600 | 33.3125 | 73 | 0.711293 | false |
ethanbao/artman | artman/pipelines/core_generation.py | 1 | 2951 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipelines that run protoc core codegen. The generated core library for each
language contains the well known types, defined by protobuf, for that language.
"""
from artman.pipelines import code_generation as code_gen
from artman.tasks import protoc_tasks
from artman.tasks import package_metadata_tasks
from artman.utils import task_utils
class CoreProtoPipeline(code_gen.CodeGenerationPipelineBase):
def __init__(self, **kwargs):
super(CoreProtoPipeline, self).__init__(
get_core_task_factory(kwargs['language']), **kwargs)
class CoreTaskFactoryBase(code_gen.TaskFactoryBase):
def get_tasks(self, **kwargs):
return task_utils.instantiate_tasks(
self._get_core_codegen_tasks(**kwargs), kwargs)
def _get_core_codegen_tasks(self, **kwargs):
raise NotImplementedError('Subclass must implement abstract method')
def get_validate_kwargs(self):
return code_gen.COMMON_REQUIRED
def get_invalid_kwargs(self):
return []
class _GoCoreTaskFactory(CoreTaskFactoryBase):
"""Responsible for the protobuf flow for Go language."""
def _get_core_codegen_tasks(self, **kwargs):
return [
protoc_tasks.ProtoCodeGenTask,
protoc_tasks.GoCopyTask,
]
def get_validate_kwargs(self):
return ['gapic_api_yaml', 'gapic_code_dir'] + code_gen.COMMON_REQUIRED
class _CSharpCoreTaskFactory(CoreTaskFactoryBase):
def _get_core_codegen_tasks(self, **kwargs):
return [protoc_tasks.ProtoCodeGenTask]
class _JavaCoreTaskFactory(CoreTaskFactoryBase):
"""Responsible for the core protobuf flow for Java language."""
def _get_core_codegen_tasks(self, **kwargs):
return [protoc_tasks.ProtoDescGenTask,
protoc_tasks.ProtoCodeGenTask,
package_metadata_tasks.PackageMetadataConfigGenTask,
package_metadata_tasks.ProtoPackageMetadataGenTask,
protoc_tasks.JavaProtoCopyTask]
_CORE_TASK_FACTORY_DICT = {
'go': _GoCoreTaskFactory,
'csharp': _CSharpCoreTaskFactory,
'java': _JavaCoreTaskFactory,
}
def get_core_task_factory(language):
cls = _CORE_TASK_FACTORY_DICT.get(language)
if cls:
return cls()
else:
raise ValueError('No core task factory found for language: '
+ language)
| apache-2.0 | 2,257,717,541,070,692,400 | 31.428571 | 79 | 0.698746 | false |
kevinsung/OpenFermion | src/openfermion/utils/_testing_utils.py | 1 | 13228 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions useful for tests."""
import collections
import itertools
import numpy
from scipy.linalg import qr
from openfermion.ops import (DiagonalCoulombHamiltonian,
InteractionOperator,
QuadraticHamiltonian,
QubitOperator)
def random_qubit_operator(n_qubits=16,
max_num_terms=16,
max_many_body_order=16,
seed=None):
prng = numpy.random.RandomState(seed)
op = QubitOperator()
num_terms = prng.randint(1, max_num_terms+1)
for _ in range(num_terms):
many_body_order = prng.randint(max_many_body_order+1)
term = []
for _ in range(many_body_order):
index = prng.randint(n_qubits)
action = prng.choice(('X', 'Y', 'Z'))
term.append((index, action))
coefficient = prng.randn()
op += QubitOperator(term, coefficient)
return op
def haar_random_vector(n, seed=None):
"""Generate an n dimensional Haar randomd vector."""
if seed is not None:
numpy.random.seed(seed)
vector = numpy.random.randn(n).astype(complex)
vector += 1.j * numpy.random.randn(n).astype(complex)
normalization = numpy.sqrt(vector.dot(numpy.conjugate(vector)))
return vector / normalization
def random_antisymmetric_matrix(n, real=False, seed=None):
"""Generate a random n x n antisymmetric matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
antisymmetric_mat = rand_mat - rand_mat.T
return antisymmetric_mat
def random_diagonal_coulomb_hamiltonian(n_qubits, real=False, seed=None):
"""Generate a random instance of DiagonalCoulombHamiltonian.
Args:
n_qubits: The number of qubits
real: Whether to use only real numbers in the one-body term
"""
if seed is not None:
numpy.random.seed(seed)
one_body = random_hermitian_matrix(n_qubits, real=real)
two_body = random_hermitian_matrix(n_qubits, real=True)
constant = numpy.random.randn()
return DiagonalCoulombHamiltonian(one_body, two_body, constant)
def random_hermitian_matrix(n, real=False, seed=None):
"""Generate a random n x n Hermitian matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
hermitian_mat = rand_mat + rand_mat.T.conj()
return hermitian_mat
def random_interaction_operator(
n_orbitals, expand_spin=False, real=True, seed=None):
"""Generate a random instance of InteractionOperator.
Args:
n_orbitals: The number of orbitals.
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
real: Whether to use only real numbers.
seed: A random number generator seed.
"""
if seed is not None:
numpy.random.seed(seed)
if real:
dtype = float
else:
dtype = complex
# The constant has to be real.
constant = numpy.random.randn()
# The one-body tensor is a random Hermitian matrix.
one_body_coefficients = random_hermitian_matrix(n_orbitals, real)
# Generate random two-body coefficients.
two_body_coefficients = numpy.zeros((n_orbitals, n_orbitals,
n_orbitals, n_orbitals), dtype)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coeff = numpy.random.randn()
if not real and len(set([p,q,r,s])) >= 3:
coeff += 1.j * numpy.random.randn()
# Four point symmetry.
two_body_coefficients[p, q, r, s] = coeff
two_body_coefficients[q, p, s, r] = coeff
two_body_coefficients[s, r, q, p] = coeff.conjugate()
two_body_coefficients[r, s, p, q] = coeff.conjugate()
# Eight point symmetry.
if real:
two_body_coefficients[r, q, p, s] = coeff
two_body_coefficients[p, s, r, q] = coeff
two_body_coefficients[s, p, q, r] = coeff
two_body_coefficients[q, r, s, p] = coeff
# If requested, expand to spin orbitals.
if expand_spin:
n_spin_orbitals = 2 * n_orbitals
# Expand one-body tensor.
one_body_coefficients = numpy.kron(one_body_coefficients, numpy.eye(2))
# Expand two-body tensor.
new_two_body_coefficients = numpy.zeros((
n_spin_orbitals, n_spin_orbitals,
n_spin_orbitals, n_spin_orbitals), dtype=complex)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
coefficient = two_body_coefficients[p, q, r, s]
# Mixed spin.
new_two_body_coefficients[2 * p, 2 * q + 1, 2 * r + 1, 2 * s] = (
coefficient)
new_two_body_coefficients[2 * p + 1, 2 * q, 2 * r, 2 * s + 1] = (
coefficient)
# Same spin.
new_two_body_coefficients[2 * p, 2 * q, 2 * r, 2 * s] = coefficient
new_two_body_coefficients[2 * p + 1, 2 * q + 1,
2 * r + 1, 2 * s + 1] = coefficient
two_body_coefficients = new_two_body_coefficients
# Create the InteractionOperator.
interaction_operator = InteractionOperator(
constant, one_body_coefficients, two_body_coefficients)
return interaction_operator
def random_quadratic_hamiltonian(n_orbitals,
conserves_particle_number=False,
real=False,
expand_spin=False,
seed=None):
"""Generate a random instance of QuadraticHamiltonian.
Args:
n_orbitals(int): the number of orbitals
conserves_particle_number(bool): whether the returned Hamiltonian
should conserve particle number
real(bool): whether to use only real numbers
expand_spin: Whether to expand each orbital symmetrically into two
spin orbitals. Note that if this option is set to True, then
the total number of orbitals will be doubled.
Returns:
QuadraticHamiltonian
"""
if seed is not None:
numpy.random.seed(seed)
constant = numpy.random.randn()
chemical_potential = numpy.random.randn()
hermitian_mat = random_hermitian_matrix(n_orbitals, real)
if conserves_particle_number:
antisymmetric_mat = None
else:
antisymmetric_mat = random_antisymmetric_matrix(n_orbitals, real)
if expand_spin:
hermitian_mat = numpy.kron(hermitian_mat, numpy.eye(2))
if antisymmetric_mat is not None:
antisymmetric_mat = numpy.kron(antisymmetric_mat, numpy.eye(2))
return QuadraticHamiltonian(hermitian_mat, antisymmetric_mat,
constant, chemical_potential)
def random_unitary_matrix(n, real=False, seed=None):
"""Obtain a random n x n unitary matrix."""
if seed is not None:
numpy.random.seed(seed)
if real:
rand_mat = numpy.random.randn(n, n)
else:
rand_mat = numpy.random.randn(n, n) + 1.j * numpy.random.randn(n, n)
Q, _ = qr(rand_mat)
return Q
class EqualsTester(object):
"""Tests equality against user-provided disjoint equivalence groups."""
def __init__(self, test_case):
self.groups = [(_ClassUnknownToSubjects(),)]
self.test_case = test_case
def add_equality_group(self, *group_items):
"""Tries to add a disjoint equivalence group to the equality tester.
This methods asserts that items within the group must all be equal to
each other, but not equal to any items in other groups that have been
or will be added.
Args:
*group_items: The items making up the equivalence group.
Raises:
AssertError: Items within the group are not equal to each other, or
items in another group are equal to items within the new group,
or the items violate the equals-implies-same-hash rule.
"""
self.test_case.assertIsNotNone(group_items)
# Check that group items are equivalent to each other.
for v1, v2 in itertools.product(group_items, repeat=2):
# Binary operators should always work.
self.test_case.assertTrue(v1 == v2)
self.test_case.assertTrue(not v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(True, False),
(NotImplemented, False),
(NotImplemented, NotImplemented)])
# Check that this group's items don't overlap with other groups.
for other_group in self.groups:
for v1, v2 in itertools.product(group_items, other_group):
# Binary operators should always work.
self.test_case.assertTrue(not v1 == v2)
self.test_case.assertTrue(v1 != v2)
# __eq__ and __ne__ should both be correct or not implemented.
self.test_case.assertTrue(
hasattr(v1, '__eq__') == hasattr(v1, '__ne__'))
# Careful: python2 int doesn't have __eq__ or __ne__.
if hasattr(v1, '__eq__'):
eq = v1.__eq__(v2)
ne = v1.__ne__(v2)
self.test_case.assertIn(
(eq, ne),
[(False, True),
(NotImplemented, True),
(NotImplemented, NotImplemented)])
# Check that group items hash to the same thing, or are all unhashable.
hashes = [hash(v) if isinstance(v, collections.Hashable) else None
for v in group_items]
if len(set(hashes)) > 1:
examples = ((v1, h1, v2, h2)
for v1, h1 in zip(group_items, hashes)
for v2, h2 in zip(group_items, hashes)
if h1 != h2)
example = next(examples)
raise AssertionError(
'Items in the same group produced different hashes. '
'Example: hash({}) is {} but hash({}) is {}.'.format(*example))
# Remember this group, to enable disjoint checks vs later groups.
self.groups.append(group_items)
def make_equality_pair(self, factory):
"""Tries to add a disjoint (item, item) group to the equality tester.
Uses the factory method to produce two different objects containing
equal items. Asserts that the two object are equal, but not equal to
any items in other groups that have been or will be added. Adds the
pair as a group.
Args:
factory (Callable[[], Any]): A method for producing independent
copies of an item.
Raises:
AssertError: The factory produces items not equal to each other, or
items in another group are equal to items from the factory, or
the items violate the equal-implies-same-hash rule.
"""
self.add_equality_group(factory(), factory())
class _ClassUnknownToSubjects(object):
"""Equality methods should be able to deal with the unexpected."""
def __eq__(self, other):
return isinstance(other, _ClassUnknownToSubjects)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(_ClassUnknownToSubjects)
def module_importable(module):
"""Without importing it, returns whether python module is importable.
Args:
module (string): Name of module.
Returns:
bool
"""
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(module)
else:
import pkgutil
plug_spec = pkgutil.find_loader(module)
if plug_spec is None:
return False
else:
return True
| apache-2.0 | 2,406,659,572,818,670,000 | 35.541436 | 79 | 0.590036 | false |
kdart/pycopia | core/pycopia/stringmatch.py | 1 | 4548 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Match plain strings like they were re module objects.
The StringExpression object implements a subset of re compile expressions.
This allows for a more consistent interface for the match types. Since
string.find is about 10 times faster than an RE search with a plain string,
this should speed up matches in that case by about that much, while
keeping a consistent interface.
"""
from __future__ import print_function
from __future__ import division
class StringMatchObject(object):
def __init__(self, start, end, string, pos, endpos, re):
self._start = start
self._end = end
self.string = string
self.pos = pos
self.endpos = endpos
self.lastgroup = None
self.lastindex = None
self.re = re # not really an RE.
def __repr__(self):
return "{0}(start={1!r}, end={2!r}, string={3!r}, pos={4!r}, endpos={5!r}, re={6!r})".format(self.__class__.__name__,
self._start, self._end, self.string, self.pos, self.endpos, self.re)
def expand(self, template):
raise NotImplementedError
def group(self, *args):
if args and args[0] == 0:
return self.string[self._start:self._end]
else:
raise IndexError("no such group")
def groups(self, default=None):
return ()
def groupdict(self, default=None):
return {}
def start(self, group=0):
if group == 0:
return self._start
else:
raise IndexError("no such group")
def end(self, group=0):
if group == 0:
return self._end
else:
raise IndexError("no such group")
def span(self, group=0):
if group == 0:
return self._start, self._end
else:
return -1, -1
def __nonzero__(self):
return 1
# an object that looks like a compiled regular expression, but does exact
# string matching. should be much faster in that case.
class StringExpression(object):
def __init__(self, patt, flags=0):
self.pattern = patt
# bogus attributes to simulate compiled REs from re module.
self.flags = flags
self.groupindex = {}
def __repr__(self):
return "{0}(patt={1!r}, flags={2!r})".format(self.__class__.__name__,
self.pattern, self.flags)
def search(self, text, pos=0, endpos=2147483647):
n = text.find(self.pattern, pos, endpos)
if n >= 0:
return StringMatchObject(n, n+len(self.pattern), text, pos, endpos, self)
else:
return None
match = search # match is same as search for strings
def split(self, text, maxsplit=0):
return text.split(self.pattern, maxsplit)
def findall(self, string, pos=0, endpos=2147483647):
rv = []
i = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
rv.append(self.pattern)
return rv
def finditer(self, string, pos=0, endpos=2147483647):
while 1:
mo = self.search(string, pos, endpos)
if mo:
yield mo
else:
return
def sub(self, repl, string, count=2147483647):
return string.replace(self.pattern, repl, count)
def subn(repl, string, count=2147483647):
i = 0
N = 0
while i >= 0:
i = string.find(self.pattern, i)
if i >= 0:
N += 1
return string.replace(self.pattern, repl, count), N
# factory function to "compile" EXACT patterns (which are strings)
def compile_exact(string, flags=0):
return StringExpression(string, flags)
def _test(argv):
cs = compile_exact("me")
mo = cs.search("matchme")
assert mo is not None
print(mo.span())
assert mo.span() == (5,7)
if __name__ == "__main__":
import sys
_test(sys.argv)
| apache-2.0 | -7,621,170,586,221,871,000 | 28.921053 | 125 | 0.599824 | false |
germanovm/vdsm | vdsm/v2v.py | 1 | 26018 | # Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""
from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import logging
import os
import re
import signal
import threading
import xml.etree.ElementTree as ET
import libvirt
from vdsm.constants import P_VDSM_RUN
from vdsm.define import errCode, doneCode
from vdsm import libvirtconnection, response
from vdsm.infra import zombiereaper
from vdsm.utils import traceback, CommandPath, execCmd, NICENESS, IOCLASS
import caps
_lock = threading.Lock()
_jobs = {}
_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_VIRT_V2V = CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
'CIM_ResourceAllocationSettingData'
ImportProgress = namedtuple('ImportProgress',
['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])
class STATUS:
'''
STARTING: request granted and starting the import process
COPYING_DISK: copying disk in progress
ABORTED: user initiated aborted
FAILED: error during import process
DONE: convert process successfully finished
'''
STARTING = 'starting'
COPYING_DISK = 'copying_disk'
ABORTED = 'aborted'
FAILED = 'error'
DONE = 'done'
class V2VError(Exception):
''' Base class for v2v errors '''
class ClientError(Exception):
''' Base class for client error '''
class InvalidVMConfiguration(ValueError):
''' Unexpected error while parsing libvirt domain xml '''
class OutputParserError(V2VError):
''' Error while parsing virt-v2v output '''
class JobExistsError(ClientError):
''' Job already exists in _jobs collection '''
err_name = 'JobExistsError'
class VolumeError(ClientError):
''' Error preparing volume '''
class NoSuchJob(ClientError):
''' Job not exists in _jobs collection '''
err_name = 'NoSuchJob'
class JobNotDone(ClientError):
''' Import process still in progress '''
err_name = 'JobNotDone'
class NoSuchOvf(V2VError):
''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
err_name = 'V2VNoSuchOvf'
class V2VProcessError(V2VError):
''' virt-v2v process had error in execution '''
class InvalidInputError(ClientError):
''' Invalid input received '''
def supported():
return not (caps.getos() in (caps.OSName.RHEVH, caps.OSName.RHEL)
and caps.osversion()['version'].startswith('6'))
def get_external_vms(uri, username, password):
if not supported():
return errCode["noimpl"]
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.error('error connection to hypervisor: %r', e.message)
return {'status': {'code': errCode['V2VConnection']['status']['code'],
'message': e.message}}
with closing(conn):
vms = []
for vm in conn.listAllDomains():
root = ET.fromstring(vm.XMLDesc(0))
params = {}
_add_vm_info(vm, params)
try:
_add_general_info(root, params)
except InvalidVMConfiguration as e:
logging.error('error parsing domain xml, msg: %s xml: %s',
e.message, vm.XMLDesc(0))
continue
_add_networks(root, params)
_add_disks(root, params)
for disk in params['disks']:
_add_disk_info(conn, disk)
vms.append(params)
return {'status': doneCode, 'vmList': vms}
def convert_external_vm(uri, username, password, vminfo, job_id, irs):
job = ImportVm.from_libvirt(uri, username, password, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return {'status': doneCode}
def convert_ova(ova_path, vminfo, job_id, irs):
job = ImportVm.from_ova(ova_path, vminfo, job_id, irs)
job.start()
_add_job(job_id, job)
return response.success()
def get_ova_info(ova_path):
ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}
try:
root = ET.fromstring(_read_ovf_from_ova(ova_path))
except ET.ParseError as e:
raise V2VError('Error reading ovf from ova, position: %r' % e.position)
vm = {}
_add_general_ovf_info(vm, root, ns)
_add_disks_ovf_info(vm, root, ns)
_add_networks_ovf_info(vm, root, ns)
return response.success(vmList=vm)
def get_converted_vm(job_id):
try:
job = _get_job(job_id)
_validate_job_done(job)
ovf = _read_ovf(job_id)
except ClientError as e:
logging.info('Converted VM error %s', e)
return errCode[e.err_name]
except V2VError as e:
logging.error('Converted VM error %s', e)
return errCode[e.err_name]
return {'status': doneCode, 'ovf': ovf}
def delete_job(job_id):
try:
job = _get_job(job_id)
_validate_job_finished(job)
_remove_job(job_id)
except ClientError as e:
logging.info('Cannot delete job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def abort_job(job_id):
try:
job = _get_job(job_id)
job.abort()
except ClientError as e:
logging.info('Cannot abort job, error: %s', e)
return errCode[e.err_name]
return {'status': doneCode}
def get_jobs_status():
ret = {}
with _lock:
items = tuple(_jobs.items())
for job_id, job in items:
ret[job_id] = {
'status': job.status,
'description': job.description,
'progress': job.progress
}
return ret
def _add_job(job_id, job):
with _lock:
if job_id in _jobs:
raise JobExistsError("Job %r exists" % job_id)
_jobs[job_id] = job
def _get_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
return _jobs[job_id]
def _remove_job(job_id):
with _lock:
if job_id not in _jobs:
raise NoSuchJob("No such job %r" % job_id)
del _jobs[job_id]
def _validate_job_done(job):
if job.status != STATUS.DONE:
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _validate_job_finished(job):
if job.status not in (STATUS.DONE, STATUS.FAILED, STATUS.ABORTED):
raise JobNotDone("Job %r is %s" % (job.id, job.status))
def _read_ovf(job_id):
file_name = os.path.join(_V2V_DIR, "%s.ovf" % job_id)
try:
with open(file_name, 'r') as f:
return f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise NoSuchOvf("No such ovf %r" % file_name)
def get_storage_domain_path(path):
'''
prepareImage returns /prefix/sdUUID/images/imgUUID/volUUID
we need storage domain absolute path so we go up 3 levels
'''
return path.rsplit(os.sep, 3)[0]
@contextmanager
def password_file(job_id, file_name, password):
fd = os.open(file_name, os.O_WRONLY | os.O_CREAT, 0o600)
try:
os.write(fd, password.value)
finally:
os.close(fd)
try:
yield
finally:
try:
os.remove(file_name)
except Exception:
logging.exception("Job %r error removing passwd file: %s",
job_id, file_name)
class ImportVm(object):
TERM_DELAY = 30
PROC_WAIT_TIMEOUT = 30
def __init__(self, vminfo, job_id, irs):
'''
do not use directly, use a factory method instead!
'''
self._vminfo = vminfo
self._id = job_id
self._irs = irs
self._status = STATUS.STARTING
self._description = ''
self._disk_progress = 0
self._disk_count = 1
self._current_disk = 1
self._aborted = False
self._prepared_volumes = []
self._uri = None
self._username = None
self._password = None
self._passwd_file = None
self._create_command = None
self._run_command = None
self._ova_path = None
@classmethod
def from_libvirt(cls, uri, username, password, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._uri = uri
obj._username = username
obj._password = password
obj._passwd_file = os.path.join(_V2V_DIR, "%s.tmp" % job_id)
obj._create_command = obj._from_libvirt_command
obj._run_command = obj._run_with_password
return obj
@classmethod
def from_ova(cls, ova_path, vminfo, job_id, irs):
obj = cls(vminfo, job_id, irs)
obj._ova_path = ova_path
obj._create_command = obj._from_ova_command
obj._run_command = obj._run
return obj
def start(self):
t = threading.Thread(target=self._run_command)
t.daemon = True
t.start()
@property
def id(self):
return self._id
@property
def status(self):
return self._status
@property
def description(self):
return self._description
@property
def progress(self):
'''
progress is part of multiple disk_progress its
flat and not 100% accurate - each disk take its
portion ie if we have 2 disks the first will take
0-50 and the second 50-100
'''
completed = (self._disk_count - 1) * 100
return (completed + self._disk_progress) / self._disk_count
def _run_with_password(self):
with password_file(self._id, self._passwd_file, self._password):
self._run()
@traceback(msg="Error importing vm")
def _run(self):
try:
self._import()
except Exception as ex:
if self._aborted:
logging.debug("Job %r was aborted", self._id)
else:
logging.exception("Job %r failed", self._id)
self._status = STATUS.FAILED
self._description = ex.message
try:
self._abort()
except Exception as e:
logging.exception('Job %r, error trying to abort: %r',
self._id, e)
finally:
self._teardown_volumes()
def _import(self):
# TODO: use the process handling http://gerrit.ovirt.org/#/c/33909/
self._prepare_volumes()
cmd = self._create_command()
logging.info('Job %r starting import', self._id)
# This is the way we run qemu-img convert jobs. virt-v2v is invoking
# qemu-img convert to perform the migration.
self._proc = execCmd(cmd, sync=False, deathSignal=signal.SIGTERM,
nice=NICENESS.HIGH, ioclass=IOCLASS.IDLE,
env=self._execution_environments())
self._proc.blocking = True
self._watch_process_output()
self._wait_for_process()
if self._proc.returncode != 0:
raise V2VProcessError('Job %r process failed exit-code: %r'
', stderr: %s' %
(self._id, self._proc.returncode,
self._proc.stderr.read(1024)))
if self._status != STATUS.ABORTED:
self._status = STATUS.DONE
logging.info('Job %r finished import successfully', self._id)
def _execution_environments(self):
env = {'LIBGUESTFS_BACKEND': 'direct'}
if 'virtio_iso_path' in self._vminfo:
env['VIRTIO_WIN'] = self._vminfo['virtio_iso_path']
return env
def _wait_for_process(self):
if self._proc.returncode is not None:
return
logging.debug("Job %r waiting for virt-v2v process", self._id)
if not self._proc.wait(timeout=self.PROC_WAIT_TIMEOUT):
raise V2VProcessError("Job %r timeout waiting for process pid=%s",
self._id, self._proc.pid)
def _watch_process_output(self):
parser = OutputParser()
for event in parser.parse(self._proc.stdout):
if isinstance(event, ImportProgress):
self._status = STATUS.COPYING_DISK
logging.info("Job %r copying disk %d/%d",
self._id, event.current_disk, event.disk_count)
self._disk_progress = 0
self._current_disk = event.current_disk
self._disk_count = event.disk_count
self._description = event.description
elif isinstance(event, DiskProgress):
self._disk_progress = event.progress
if event.progress % 10 == 0:
logging.info("Job %r copy disk %d progress %d/100",
self._id, self._current_disk, event.progress)
else:
raise RuntimeError("Job %r got unexpected parser event: %s" %
(self._id, event))
def _from_libvirt_command(self):
cmd = [_VIRT_V2V.cmd,
'-ic', self._uri,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower()]
cmd.extend(self._generate_disk_parameters())
cmd.extend(['--password-file',
self._passwd_file,
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path']),
self._vminfo['vmName']])
return cmd
def _from_ova_command(self):
cmd = [_VIRT_V2V.cmd,
'-i', 'ova', self._ova_path,
'-o', 'vdsm',
'-of', self._get_disk_format(),
'-oa', self._vminfo.get('allocation', 'sparse').lower(),
'--vdsm-vm-uuid',
self._id,
'--vdsm-ovf-output',
_V2V_DIR,
'--machine-readable',
'-os',
get_storage_domain_path(self._prepared_volumes[0]['path'])]
cmd.extend(self._generate_disk_parameters())
return cmd
def abort(self):
self._status = STATUS.ABORTED
logging.info('Job %r aborting...', self._id)
self._abort()
def _abort(self):
self._aborted = True
if self._proc.returncode is None:
logging.debug('Job %r killing virt-v2v process', self._id)
try:
self._proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
logging.debug('Job %r virt-v2v process not running',
self._id)
else:
logging.debug('Job %r virt-v2v process was killed',
self._id)
finally:
zombiereaper.autoReapPID(self._proc.pid)
def _get_disk_format(self):
fmt = self._vminfo.get('format', 'raw').lower()
if fmt == 'cow':
return 'qcow2'
return fmt
def _generate_disk_parameters(self):
parameters = []
for disk in self._vminfo['disks']:
try:
parameters.append('--vdsm-image-uuid')
parameters.append(disk['imageID'])
parameters.append('--vdsm-vol-uuid')
parameters.append(disk['volumeID'])
except KeyError as e:
raise InvalidInputError('Job %r missing required property: %s'
% (self._id, e))
return parameters
def _prepare_volumes(self):
if len(self._vminfo['disks']) < 1:
raise InvalidInputError('Job %r cannot import vm with no disk',
self._id)
for disk in self._vminfo['disks']:
drive = {'poolID': self._vminfo['poolID'],
'domainID': self._vminfo['domainID'],
'volumeID': disk['volumeID'],
'imageID': disk['imageID']}
res = self._irs.prepareImage(drive['domainID'],
drive['poolID'],
drive['imageID'],
drive['volumeID'])
if res['status']['code']:
raise VolumeError('Job %r bad volume specification: %s' %
(self._id, drive))
drive['path'] = res['path']
self._prepared_volumes.append(drive)
def _teardown_volumes(self):
for drive in self._prepared_volumes:
try:
self._irs.teardownImage(drive['domainID'],
drive['poolID'],
drive['imageID'])
except Exception as e:
logging.error('Job %r error tearing down drive: %s',
self._id, e)
class OutputParser(object):
COPY_DISK_RE = re.compile(r'.*(Copying disk (\d+)/(\d+)).*')
DISK_PROGRESS_RE = re.compile(r'\s+\((\d+).*')
def parse(self, stream):
for line in stream:
if 'Copying disk' in line:
description, current_disk, disk_count = self._parse_line(line)
yield ImportProgress(int(current_disk), int(disk_count),
description)
for chunk in self._iter_progress(stream):
progress = self._parse_progress(chunk)
yield DiskProgress(progress)
if progress == 100:
break
def _parse_line(self, line):
m = self.COPY_DISK_RE.match(line)
if m is None:
raise OutputParserError('unexpected format in "Copying disk"'
', line: %r' % line)
return m.group(1), m.group(2), m.group(3)
def _iter_progress(self, stream):
chunk = ''
while True:
c = stream.read(1)
chunk += c
if c == '\r':
yield chunk
chunk = ''
def _parse_progress(self, chunk):
m = self.DISK_PROGRESS_RE.match(chunk)
if m is None:
raise OutputParserError('error parsing progress, chunk: %r'
% chunk)
try:
return int(m.group(1))
except ValueError:
raise OutputParserError('error parsing progress regex: %r'
% m.groups)
def _mem_to_mib(size, unit):
lunit = unit.lower()
if lunit in ('bytes', 'b'):
return size / 1024 / 1024
elif lunit in ('kib', 'k'):
return size / 1024
elif lunit in ('mib', 'm'):
return size
elif lunit in ('gib', 'g'):
return size * 1024
elif lunit in ('tib', 't'):
return size * 1024 * 1024
else:
raise InvalidVMConfiguration("Invalid currentMemory unit attribute:"
" %r" % unit)
def _add_vm_info(vm, params):
params['vmName'] = vm.name()
if vm.state()[0] == libvirt.VIR_DOMAIN_SHUTOFF:
params['status'] = "Down"
else:
params['status'] = "Up"
def _add_general_info(root, params):
e = root.find('./uuid')
if e is not None:
params['vmId'] = e.text
e = root.find('./currentMemory')
if e is not None:
try:
size = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'currentMemory' value: %r"
% e.text)
unit = e.get('unit', 'KiB')
params['memSize'] = _mem_to_mib(size, unit)
e = root.find('./vcpu')
if e is not None:
try:
params['smp'] = int(e.text)
except ValueError:
raise InvalidVMConfiguration("Invalid 'vcpu' value: %r" % e.text)
e = root.find('./os/type/[@arch]')
if e is not None:
params['arch'] = e.get('arch')
def _add_disk_info(conn, disk):
if 'alias' in disk.keys():
try:
vol = conn.storageVolLookupByPath(disk['alias'])
_, capacity, alloc = vol.info()
except libvirt.libvirtError:
logging.exception("Error getting disk size")
disk['capacity'] = str(capacity)
disk['allocation'] = str(alloc)
def _add_disks(root, params):
params['disks'] = []
disks = root.findall('.//disk[@type="file"]')
for disk in disks:
d = {}
device = disk.get('device')
if device is not None:
d['type'] = device
target = disk.find('./target/[@dev]')
if target is not None:
d['dev'] = target.get('dev')
source = disk.find('./source/[@file]')
if source is not None:
d['alias'] = source.get('file')
params['disks'].append(d)
def _add_networks(root, params):
params['networks'] = []
interfaces = root.findall('.//interface')
for iface in interfaces:
i = {}
if 'type' in iface.attrib:
i['type'] = iface.attrib['type']
mac = iface.find('./mac/[@address]')
if mac is not None:
i['macAddr'] = mac.get('address')
source = iface.find('./source/[@bridge]')
if source is not None:
i['bridge'] = source.get('bridge')
target = iface.find('./target/[@dev]')
if target is not None:
i['dev'] = target.get('dev')
model = iface.find('./model/[@type]')
if model is not None:
i['model'] = model.get('type')
params['networks'].append(i)
def _read_ovf_from_ova(ova_path):
# FIXME: change to tarfile package when support --to-stdout
cmd = ['/usr/bin/tar', 'xf', ova_path, '*.ovf', '--to-stdout']
rc, output, error = execCmd(cmd)
if rc:
raise V2VError(error)
return ''.join(output)
def _add_general_ovf_info(vm, node, ns):
vm['status'] = 'Down'
vmName = node.find('./ovf:VirtualSystem/ovf:Name', ns)
if vmName is not None:
vm['vmName'] = vmName.text
else:
raise V2VError('Error parsing ovf information: no ovf:Name')
memSize = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_MEMORY, ns)
if memSize is not None:
vm['memSize'] = int(memSize.text)
else:
raise V2VError('Error parsing ovf information: no memory size')
smp = node.find('.//ovf:Item[rasd:ResourceType="%d"]/'
'rasd:VirtualQuantity' % _OVF_RESOURCE_CPU, ns)
if smp is not None:
vm['smp'] = int(smp.text)
else:
raise V2VError('Error parsing ovf information: no cpu info')
def _add_disks_ovf_info(vm, node, ns):
vm['disks'] = []
for d in node.findall(".//ovf:DiskSection/ovf:Disk", ns):
disk = {'type': 'disk'}
capacity = d.attrib.get('{%s}capacity' % _OVF_NS)
disk['capacity'] = str(int(capacity) * 1024 * 1024 * 1024)
fileref = d.attrib.get('{%s}fileRef' % _OVF_NS)
alias = node.find('.//ovf:References/ovf:File[@ovf:id="%s"]' %
fileref, ns)
if alias is not None:
disk['alias'] = alias.attrib.get('{%s}href' % _OVF_NS)
disk['allocation'] = str(alias.attrib.get('{%s}size' % _OVF_NS))
else:
raise V2VError('Error parsing ovf information: disk href info')
vm['disks'].append(disk)
def _add_networks_ovf_info(vm, node, ns):
vm['networks'] = []
for n in node.findall('.//ovf:Item[rasd:ResourceType="%d"]'
% _OVF_RESOURCE_NETWORK, ns):
net = {}
dev = n.find('./rasd:ElementName', ns)
if dev is not None:
net['dev'] = dev.text
else:
raise V2VError('Error parsing ovf information: '
'network element name')
model = n.find('./rasd:ResourceSubType', ns)
if model is not None:
net['model'] = model.text
else:
raise V2VError('Error parsing ovf information: network model')
bridge = n.find('./rasd:Connection', ns)
if bridge is not None:
net['bridge'] = bridge.text
net['type'] = 'bridge'
else:
net['type'] = 'interface'
vm['networks'].append(net)
| gpl-2.0 | 1,767,331,554,670,716,700 | 31.360697 | 79 | 0.547352 | false |
kwilliams-mo/iris | lib/iris/tests/test_util.py | 1 | 13481 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test iris.util
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import inspect
import os
import StringIO
import unittest
import numpy as np
import iris.analysis
import iris.coords
import iris.tests.stock as stock
import iris.util
class TestMonotonic(unittest.TestCase):
def assertMonotonic(self, array, direction=None, **kwargs):
if direction is not None:
mono, dir = iris.util.monotonic(array, return_direction=True, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
if dir != np.sign(direction):
self.fail('Array was monotonic but not in the direction expected:'
'/n + requested direction: %s/n + resultant direction: %s' % (direction, dir))
else:
mono = iris.util.monotonic(array, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
def assertNotMonotonic(self, array, **kwargs):
mono = iris.util.monotonic(array, **kwargs)
if mono:
self.fail("Array was monotonic when it shouldn't be:/n %r" % array)
def test_monotonic_pve(self):
a = np.array([3, 4, 5.3])
self.assertMonotonic(a)
self.assertMonotonic(a, direction=1)
# test the reverse for negative monotonic.
a = a[::-1]
self.assertMonotonic(a)
self.assertMonotonic(a, direction=-1)
def test_not_monotonic(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b)
def test_monotonic_strict(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b, strict=True)
b = np.array([3, 5.3, 5.3])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
b = np.array([0.0])
self.assertRaises(ValueError, iris.util.monotonic, b)
self.assertRaises(ValueError, iris.util.monotonic, b, strict=True)
b = np.array([0.0, 0.0])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
class TestReverse(unittest.TestCase):
def test_simple(self):
a = np.arange(12).reshape(3, 4)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, 1))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, [1]))
self.assertRaises(ValueError, iris.util.reverse, a, [])
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
def test_single(self):
a = np.arange(36).reshape(3, 4, 3)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1, ::-1], iris.util.reverse(a, [1, 2]))
np.testing.assert_array_equal(a[..., ::-1], iris.util.reverse(a, 2))
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
class TestClipString(unittest.TestCase):
def setUp(self):
self.test_string = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
self.rider = "**^^**$$..--__" # A good chance at being unique and not in the string to be tested!
def test_oversize_string(self):
# Test with a clip length that means the string will be clipped
clip_length = 109
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
# Check the length is between what we requested ( + rider length) and the length of the original string
self.assertTrue(clip_length + len(self.rider) <= len(result) < len(self.test_string), "String was not clipped.")
# Also test the rider was added
self.assertTrue(self.rider in result, "Rider was not added to the string when it should have been.")
def test_undersize_string(self):
# Test with a clip length that is longer than the string
clip_length = 10999
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
# Also test that no rider was added on the end if the string was not clipped
self.assertFalse(self.rider in result, "Rider was adding to the string when it should not have been.")
def test_invalid_clip_lengths(self):
# Clip values less than or equal to zero are not valid
for clip_length in [0, -100]:
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
def test_default_values(self):
# Get the default values specified in the function
argspec = inspect.getargspec(iris.util.clip_string)
arg_dict = dict(zip(argspec.args[-2:], argspec.defaults))
result = iris.util.clip_string(self.test_string, arg_dict["clip_length"], arg_dict["rider"])
self.assertLess(len(result), len(self.test_string), "String was not clipped.")
rider_returned = result[-len(arg_dict["rider"]):]
self.assertEquals(rider_returned, arg_dict["rider"], "Default rider was not applied.")
def test_trim_string_with_no_spaces(self):
clip_length = 200
no_space_string = "a" * 500
# Since this string has no spaces, clip_string will not be able to gracefully clip it
# but will instead clip it exactly where the user specified
result = iris.util.clip_string(no_space_string, clip_length, self.rider)
expected_length = clip_length + len(self.rider)
# Check the length of the returned string is equal to clip length + length of rider
self.assertEquals(len(result), expected_length, "Mismatch in expected length of clipped string. Length was %s, expected value is %s" % (len(result), expected_length))
class TestDescribeDiff(iris.tests.IrisTest):
def test_identical(self):
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
return_str_IO = StringIO.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'compatible_cubes.str.txt')
def test_different(self):
return_str_IO = StringIO.StringIO()
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_attr.str.txt')
# test incompatible names
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.standard_name = "relative_humidity"
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_name.str.txt')
# test incompatible unit
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.units = iris.unit.Unit('m')
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_unit.str.txt')
# test incompatible methods
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_meth.str.txt')
def test_output_file(self):
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
test_cube_a.standard_name = "relative_humidity"
test_cube_a.units = iris.unit.Unit('m')
with self.temp_filename() as filename:
with open(filename, 'w') as f:
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=f)
f.close()
self.assertFilesEqual(filename,
'incompatible_cubes.str.txt')
class TestAsCompatibleShape(tests.IrisTest):
def test_slice(self):
cube = tests.stock.realistic_4d()
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_transpose(self):
cube = tests.stock.realistic_4d()
transposed = cube.copy()
transposed.transpose()
expected = cube
res = iris.util.as_compatible_shape(transposed, cube)
self.assertEqual(res, expected)
def test_slice_and_transpose(self):
cube = tests.stock.realistic_4d()
sliced_and_transposed = cube[1, :, 2, :-2]
sliced_and_transposed.transpose()
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced_and_transposed, cube)
self.assertEqual(res, expected)
def test_collapsed(self):
cube = tests.stock.realistic_4d()
collapsed = cube.collapsed('model_level_number', iris.analysis.MEAN)
expected_shape = list(cube.shape)
expected_shape[1] = 1
expected_data = collapsed.data.reshape(expected_shape)
res = iris.util.as_compatible_shape(collapsed, cube)
self.assertCML(res, ('util', 'as_compatible_shape_collapsed.cml'),
checksum=False)
self.assertArrayEqual(expected_data, res.data)
self.assertArrayEqual(expected_data.mask, res.data.mask)
def test_reduce_dimensionality(self):
# Test that as_compatible_shape() can demote
# length one dimensions to scalars.
cube = tests.stock.realistic_4d()
src = cube[:, 2:3]
expected = reduced = cube[:, 2]
res = iris.util.as_compatible_shape(src, reduced)
self.assertEqual(res, expected)
def test_anonymous_dims(self):
cube = tests.stock.realistic_4d()
# Move all coords from dim_coords to aux_coords.
for coord in cube.dim_coords:
dim = cube.coord_dims(coord)
cube.remove_coord(coord)
cube.add_aux_coord(coord, dim)
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_scalar_auxcoord(self):
def dim_to_aux(cube, coord_name):
"""Convert coordinate on cube from DimCoord to AuxCoord."""
coord = cube.coord(coord_name)
coord = iris.coords.AuxCoord.from_coord(coord)
cube.replace_coord(coord)
cube = tests.stock.realistic_4d()
src = cube[:, :, 3]
dim_to_aux(src, 'grid_latitude')
expected = cube[:, :, 3:4]
dim_to_aux(expected, 'grid_latitude')
res = iris.util.as_compatible_shape(src, cube)
self.assertEqual(res, expected)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,546,824,974,458,440,700 | 40.2263 | 475 | 0.6366 | false |
omriabnd/UCCA-App | Server/uccaApp/models/Users.py | 1 | 1828 | from datetime import datetime
from rest_framework.exceptions import ValidationError
from uccaApp.models import Tabs, Constants, Roles
from django.db import models
from django.contrib.auth.models import User, Group
class Users(models.Model):
id = models.AutoField(primary_key=True)
user_auth = models.OneToOneField(User,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
# user_group = models.OneToOneField(Group,null=False, related_name="base_user", default=1, on_delete=models.CASCADE,unique=True)
first_name = models.CharField(max_length=100, default='')
last_name = models.CharField(max_length=100, default='')
email = models.EmailField(max_length=100,unique=True)
organization = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
affiliation = models.CharField(max_length=Constants.ORGANIZATION_MAX_LENGTH)
role = models.ForeignKey(Roles,max_length=256,db_column="role")
created_by = models.ForeignKey(User,null=True,blank=True, related_name="created_by_user",db_column="created_by")
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(default=datetime.now, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.first_name
class Meta:
db_table="users"
def set_group(self,user_id,new_role_name):
# remove users permissions
User.objects.get(pk=user_id).groups.clear()
# grant new group to user
Group.objects.get(name=new_role_name).user_set.add(User.objects.get(pk=user_id))
def validate_email_unique(email):
exists = User.objects.filter(email=email)
if exists:
raise ValidationError("Email address %s already exits, must be unique" % email) | gpl-3.0 | -3,633,067,282,102,347,000 | 41.534884 | 132 | 0.721007 | false |
skosukhin/spack | lib/spack/spack/cmd/compiler.py | 1 | 7883 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import argparse
import sys
from six import iteritems
import llnl.util.tty as tty
import spack.compilers
import spack.config
import spack.spec
from llnl.util.lang import index_by
from llnl.util.tty.colify import colify
from llnl.util.tty.color import colorize
from spack.spec import CompilerSpec, ArchSpec
description = "manage compilers"
section = "system"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='compiler_command')
scopes = spack.config.config_scopes
# Find
find_parser = sp.add_parser(
'find', aliases=['add'],
help='search the system for compilers to add to Spack configuration')
find_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
find_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# Remove
remove_parser = sp.add_parser(
'remove', aliases=['rm'], help='remove compiler by spec')
remove_parser.add_argument(
'-a', '--all', action='store_true',
help='remove ALL compilers that match spec')
remove_parser.add_argument('compiler_spec')
remove_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_modify_scope,
help="configuration scope to modify")
# List
list_parser = sp.add_parser('list', help='list available compilers')
list_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
# Info
info_parser = sp.add_parser('info', help='show compiler paths')
info_parser.add_argument('compiler_spec')
info_parser.add_argument(
'--scope', choices=scopes, default=spack.cmd.default_list_scope,
help="configuration scope to read from")
def compiler_find(args):
"""Search either $PATH or a list of paths OR MODULES for compilers and
add them to Spack's configuration.
"""
paths = args.add_paths
# Don't initialize compilers config via compilers.get_compiler_config.
# Just let compiler_find do the
# entire process and return an empty config from all_compilers
# Default for any other process is init_config=True
compilers = [c for c in spack.compilers.find_compilers(*paths)]
new_compilers = []
for c in compilers:
arch_spec = ArchSpec(None, c.operating_system, c.target)
same_specs = spack.compilers.compilers_for_spec(
c.spec, arch_spec, init_config=False)
if not same_specs:
new_compilers.append(c)
if new_compilers:
spack.compilers.add_compilers_to_config(new_compilers,
scope=args.scope,
init_config=False)
n = len(new_compilers)
s = 's' if n > 1 else ''
filename = spack.config.get_config_filename(args.scope, 'compilers')
tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
colify(reversed(sorted(c.spec for c in new_compilers)), indent=4)
else:
tty.msg("Found no new compilers")
tty.msg("Compilers are defined in the following files:")
colify(spack.compilers.compiler_config_files(), indent=4)
def compiler_remove(args):
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.die("No compilers match spec %s" % cspec)
elif not args.all and len(compilers) > 1:
tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
colify(reversed(sorted([c.spec for c in compilers])), indent=4)
tty.msg("Or, use `spack compiler remove -a` to remove all of them.")
sys.exit(1)
for compiler in compilers:
spack.compilers.remove_compiler_from_config(
compiler.spec, scope=args.scope)
tty.msg("Removed compiler %s" % compiler.spec)
def compiler_info(args):
"""Print info about all compilers matching a spec."""
cspec = CompilerSpec(args.compiler_spec)
compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
if not compilers:
tty.error("No compilers match spec %s" % cspec)
else:
for c in compilers:
print(str(c.spec) + ":")
print("\ttarget: " + c.target)
print("\toperating_system: " + c.operating_system)
print("\tpaths:")
for cpath in ['cc', 'cxx', 'f77', 'fc']:
print("\t\t%s: %s" % (cpath, getattr(c, cpath, None)))
if any(c.flags):
print("\tflags:")
for flag, flag_value in iteritems(c.flags):
print("\t\t%s: %s" % (flag, flag_value))
else:
print("\tflags: " + str(type(c.flags)()))
if any(c.environment):
print("\tenvironment:")
for command in c.environment:
print("\t\t%s" % command)
else:
print("\tenvironment: " + str(type(c.environment)()))
if any(c.extra_rpaths):
print("\tExtra RPATHs:")
for extra_rpath in c.extra_rpaths:
print("\t\t" + extra_rpath)
else:
print("\tExtra RPATHs: " + str(type(c.extra_rpaths)()))
if any(c.modules):
print("\tmodules:")
for module in c.modules:
print("\t\t" + module)
else:
print("\tmodules: " + str(type(c.modules)()))
def compiler_list(args):
tty.msg("Available compilers")
index = index_by(spack.compilers.all_compilers(scope=args.scope),
lambda c: (c.spec.name, c.operating_system, c.target))
ordered_sections = sorted(index.items(), key=lambda item: item[0])
for i, (key, compilers) in enumerate(ordered_sections):
if i >= 1:
print()
name, os, target = key
os_str = os
if target:
os_str += "-%s" % target
cname = "%s{%s} %s" % (spack.spec.compiler_color, name, os_str)
tty.hline(colorize(cname), char='-')
colify(reversed(sorted(c.spec for c in compilers)))
def compiler(parser, args):
action = {'add': compiler_find,
'find': compiler_find,
'remove': compiler_remove,
'rm': compiler_remove,
'info': compiler_info,
'list': compiler_list}
action[args.compiler_command](args)
| lgpl-2.1 | 1,675,584,116,106,376,700 | 36.899038 | 78 | 0.608905 | false |
380wmda999/sphinx2.2.11-string-4G | api/sphinxapi.py | 1 | 35093 | #
# $Id$
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006, Mike Osadnik
# Copyright (c) 2006-2016, Andrew Aksyonoff
# Copyright (c) 2008-2016, Sphinx Technologies Inc
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License. You should
# have received a copy of the LGPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
# WARNING!!!
#
# As of 2015, we strongly recommend to use either SphinxQL or REST APIs
# rather than the native SphinxAPI.
#
# While both the native SphinxAPI protocol and the existing APIs will
# continue to exist, and perhaps should not even break (too much), exposing
# all the new features via multiple different native API implementations
# is too much of a support complication for us.
#
# That said, you're welcome to overtake the maintenance of any given
# official API, and remove this warning ;)
#
import sys
import select
import socket
import re
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS = 3
SEARCHD_COMMAND_PERSIST = 4
SEARCHD_COMMAND_STATUS = 5
SEARCHD_COMMAND_FLUSHATTRS = 7
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x11E
VER_COMMAND_EXCERPT = 0x104
VER_COMMAND_UPDATE = 0x103
VER_COMMAND_KEYWORDS = 0x100
VER_COMMAND_STATUS = 0x101
VER_COMMAND_FLUSHATTRS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
SPH_RANK_PROXIMITY = 4
SPH_RANK_MATCHANY = 5
SPH_RANK_FIELDMASK = 6
SPH_RANK_SPH04 = 7
SPH_RANK_EXPR = 8
SPH_RANK_TOTAL = 9
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
SPH_FILTER_STRING = 3
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_BIGINT = 6
SPH_ATTR_STRING = 7
SPH_ATTR_FACTORS = 1001
SPH_ATTR_MULTI = 0X40000001L
SPH_ATTR_MULTI64 = 0X40000002L
SPH_ATTR_TYPES = (SPH_ATTR_NONE,
SPH_ATTR_INTEGER,
SPH_ATTR_TIMESTAMP,
SPH_ATTR_ORDINAL,
SPH_ATTR_BOOL,
SPH_ATTR_FLOAT,
SPH_ATTR_BIGINT,
SPH_ATTR_STRING,
SPH_ATTR_MULTI,
SPH_ATTR_MULTI64)
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
SPH_GROUPBY_ATTRPAIR = 5
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 9312 # searchd port (default is 9312)
self._path = None # searchd unix-domain socket path
self._socket = None
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_EXTENDED2 # query matching mode (default is SPH_MATCH_EXTENDED2)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0 # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._rankexpr = '' # ranking expression for SPH_RANK_EXPR
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._timeout = 1.0 # connection timeout
self._fieldweights = {} # per-field-name weights
self._overrides = {} # per-query attribute values overrides
self._select = '*' # select-list (attributes or expressions, with optional aliases)
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0 # per-query max_predicted_time
self._outerorderby = '' # outer match sort by
self._outeroffset = 0 # outer offset
self._outerlimit = 0 # outer limit
self._hasouter = False # sub-select enabled
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
def __del__ (self):
if self._socket:
self._socket.close()
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port = None):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
if host.startswith('/'):
self._path = host
return
elif host.startswith('unix://'):
self._path = host[7:]
return
self._host = host
if isinstance(port, int):
assert(port>0 and port<65536)
self._port = port
self._path = None
def SetConnectTimeout ( self, timeout ):
"""
Set connection timeout ( float second )
"""
assert (isinstance(timeout, float))
# set timeout to 0 make connaection non-blocking that is wrong so timeout got clipped to reasonable minimum
self._timeout = max ( 0.001, timeout )
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
if self._socket:
# we have a socket, but is it still alive?
sr, sw, _ = select.select ( [self._socket], [self._socket], [], 0 )
# this is how alive socket should look
if len(sr)==0 and len(sw)==1:
return self._socket
# oops, looks like it was closed, lets reopen
self._socket.close()
self._socket = None
try:
if self._path:
af = socket.AF_UNIX
addr = self._path
desc = self._path
else:
af = socket.AF_INET
addr = ( self._host, self._port )
desc = '%s;%s' % addr
sock = socket.socket ( af, socket.SOCK_STREAM )
sock.settimeout ( self._timeout )
sock.connect ( addr )
except socket.error, msg:
if sock:
sock.close()
self._error = 'connection to %s failed (%s)' % ( desc, msg )
return
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
if not self._socket:
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def _Send ( self, sock, req ):
"""
INTERNAL METHOD, DO NOT CALL. send request to searchd server.
"""
total = 0
while True:
sent = sock.send ( req[total:] )
if sent<=0:
break
total = total + sent
return total
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert ( type(offset) in [int,long] and 0<=offset<16777216 )
assert ( type(limit) in [int,long] and 0<limit<16777216 )
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
print >> sys.stderr, 'DEPRECATED: Do not call this method or, even better, use SphinxQL instead of an API'
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode ( self, ranker, rankexpr='' ):
"""
Set ranking mode.
"""
assert(ranker>=0 and ranker<SPH_RANK_TOTAL)
self._ranker = ranker
self._rankexpr = rankexpr
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in weights.items():
assert(isinstance(key,str))
AssertUInt32(val)
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, (int, long)))
assert(isinstance(maxid, (int, long)))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert iter(values)
for value in values:
AssertInt32 ( value )
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterString ( self, attribute, value, exclude=0 ):
"""
Set string filter.
Only match records where 'attribute' value is equal
"""
assert(isinstance(attribute, str))
assert(isinstance(value, str))
print ( "attr='%s' val='%s' " % ( attribute, value ) )
self._filters.append ( { 'type':SPH_FILTER_STRING, 'attr':attribute, 'exclude':exclude, 'value':value } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
AssertInt32(min_)
AssertInt32(max_)
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR, SPH_GROUPBY_ATTRPAIR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def SetOverride (self, name, type, values):
print >> sys.stderr, 'DEPRECATED: Do not call this method. Use SphinxQL REMAP() function instead.'
assert(isinstance(name, str))
assert(type in SPH_ATTR_TYPES)
assert(isinstance(values, dict))
self._overrides[name] = {'name': name, 'type': type, 'values': values}
def SetSelect (self, select):
assert(isinstance(select, str))
self._select = select
def SetQueryFlag ( self, name, value ):
known_names = [ "reverse_scan", "sort_method", "max_predicted_time", "boolean_simplify", "idf", "global_idf" ]
flags = { "reverse_scan":[0, 1], "sort_method":["pq", "kbuffer"],"max_predicted_time":[0], "boolean_simplify":[True, False], "idf":["normalized", "plain", "tfidf_normalized", "tfidf_unnormalized"], "global_idf":[True, False] }
assert ( name in known_names )
assert ( value in flags[name] or ( name=="max_predicted_time" and isinstance(value, (int, long)) and value>=0))
if name=="reverse_scan":
self._query_flags = SetBit ( self._query_flags, 0, value==1 )
if name=="sort_method":
self._query_flags = SetBit ( self._query_flags, 1, value=="kbuffer" )
if name=="max_predicted_time":
self._query_flags = SetBit ( self._query_flags, 2, value>0 )
self._predictedtime = int(value)
if name=="boolean_simplify":
self._query_flags= SetBit ( self._query_flags, 3, value )
if name=="idf" and ( value=="plain" or value=="normalized" ) :
self._query_flags = SetBit ( self._query_flags, 4, value=="plain" )
if name=="global_idf":
self._query_flags= SetBit ( self._query_flags, 5, value )
if name=="idf" and ( value=="tfidf_normalized" or value=="tfidf_unnormalized" ) :
self._query_flags = SetBit ( self._query_flags, 6, value=="tfidf_normalized" )
def SetOuterSelect ( self, orderby, offset, limit ):
assert(isinstance(orderby, str))
assert(isinstance(offset, (int, long)))
assert(isinstance(limit, (int, long)))
assert ( offset>=0 )
assert ( limit>0 )
self._outerorderby = orderby
self._outeroffset = offset
self._outerlimit = limit
self._hasouter = True
def ResetOverrides (self):
self._overrides = {}
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def ResetQueryFlag (self):
self._query_flags = SetBit ( 0, 6, True ) # default idf=tfidf_normalized
self._predictedtime = 0
def ResetOuterSelect (self):
self._outerorderby = ''
self._outeroffset = 0
self._outerlimit = 0
self._hasouter = False
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
self._reqs = [] # we won't re-run erroneous batch
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = []
req.append(pack('>5L', self._query_flags, self._offset, self._limit, self._mode, self._ranker))
if self._ranker==SPH_RANK_EXPR:
req.append(pack('>L', len(self._rankexpr)))
req.append(self._rankexpr)
req.append(pack('>L', self._sort))
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,unicode):
query = query.encode('utf-8')
assert(isinstance(query,str))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
assert(isinstance(index,str))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',1)) # id64 range marker
req.append(pack('>Q', self._min_id))
req.append(pack('>Q', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>q', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2q', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
elif filtertype == SPH_FILTER_STRING:
req.append ( pack ( '>L', len(f['value']) ) )
req.append ( f['value'] )
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in self._indexweights.items():
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in self._fieldweights.items():
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
comment = str(comment)
req.append ( pack('>L',len(comment)) + comment )
# attribute overrides
req.append ( pack('>L', len(self._overrides)) )
for v in self._overrides.values():
req.extend ( ( pack('>L', len(v['name'])), v['name'] ) )
req.append ( pack('>LL', v['type'], len(v['values'])) )
for id, value in v['values'].iteritems():
req.append ( pack('>Q', id) )
if v['type'] == SPH_ATTR_FLOAT:
req.append ( pack('>f', value) )
elif v['type'] == SPH_ATTR_BIGINT:
req.append ( pack('>q', value) )
else:
req.append ( pack('>l', value) )
# select-list
req.append ( pack('>L', len(self._select)) )
req.append ( self._select )
if self._predictedtime>0:
req.append ( pack('>L', self._predictedtime ) )
# outer
req.append ( pack('>L',len(self._outerorderby)) + self._outerorderby )
req.append ( pack ( '>2L', self._outeroffset, self._outerlimit ) )
if self._hasouter:
req.append ( pack('>L', 1) )
else:
req.append ( pack('>L', 0) )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+8
req = pack('>HHLLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, 0, len(self._reqs))+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
results.append(result)
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
doc, weight = unpack('>QL', response[p:p+12])
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == SPH_ATTR_BIGINT:
match['attrs'][attrs[i][0]] = unpack('>q', response[p:p+8])[0]
p += 4
elif attrs[i][1] == SPH_ATTR_STRING:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen]
p += slen-4
elif attrs[i][1] == SPH_ATTR_FACTORS:
slen = unpack('>L', response[p:p+4])[0]
p += 4
match['attrs'][attrs[i][0]] = ''
if slen>0:
match['attrs'][attrs[i][0]] = response[p:p+slen-4]
p += slen-4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
elif attrs[i][1] == SPH_ATTR_MULTI64:
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
nvals = nvals/2
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>q', response[p:p+8])[0])
p += 8
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
self._reqs = []
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,unicode):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, str))
assert(isinstance(words, str))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('html_strip_mode', 'index')
opts.setdefault('limit', 256)
opts.setdefault('limit_passages', 0)
opts.setdefault('limit_words', 0)
opts.setdefault('around', 5)
opts.setdefault('start_passage_id', 1)
opts.setdefault('passage_boundary', 'none')
# build request
# v.1.0 req
flags = 1 # (remove spaces)
if opts.get('exact_phrase'): flags |= 2
if opts.get('single_passage'): flags |= 4
if opts.get('use_boundaries'): flags |= 8
if opts.get('weight_order'): flags |= 16
if opts.get('query_mode'): flags |= 32
if opts.get('force_all_words'): flags |= 64
if opts.get('load_files'): flags |= 128
if opts.get('allow_empty'): flags |= 256
if opts.get('emit_zones'): flags |= 512
if opts.get('load_files_scattered'): flags |= 1024
# mode=0, flags
req = [pack('>2L', 0, flags)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
req.append(pack('>L', int(opts['limit_passages'])))
req.append(pack('>L', int(opts['limit_words'])))
req.append(pack('>L', int(opts['start_passage_id'])))
req.append(pack('>L', len(opts['html_strip_mode'])))
req.append((opts['html_strip_mode']))
req.append(pack('>L', len(opts['passage_boundary'])))
req.append((opts['passage_boundary']))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,unicode):
doc = doc.encode('utf-8')
assert(isinstance(doc, str))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
self._Send ( sock, req )
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values, mva=False, ignorenonexistent=False ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
optional boolean parameter 'mva' points that there is update of MVA attributes.
In this case the 'values' must be a dict with int key (document ID) and list of lists of int values
(new MVA attribute values).
Optional boolean parameter 'ignorenonexistent' points that the update will silently ignore any warnings about
trying to update a column which is not exists in current index schema.
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in values.items():
AssertUInt32(docid)
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
if mva:
assert ( isinstance ( val, list ) )
for vals in val:
AssertInt32(vals)
else:
AssertInt32(val)
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
ignore_absent = 0
if ignorenonexistent: ignore_absent = 1
req.append ( pack('>L', ignore_absent ) )
mva_attr = 0
if mva: mva_attr = 1
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L', mva_attr ) )
req.append ( pack('>L',len(values)) )
for docid, entry in values.items():
req.append ( pack('>Q',docid) )
for val in entry:
val_len = val
if mva: val_len = len ( val )
req.append ( pack('>L',val_len ) )
if mva:
for vals in val:
req.append ( pack ('>L',vals) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
def Status ( self, session=False ):
"""
Get the status
"""
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
sess = 1
if session:
sess = 0
req = pack ( '>2HLL', SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, sess )
self._Send ( sock, req )
response = self._GetResponse ( sock, VER_COMMAND_STATUS )
if not response:
return None
# parse response
res = []
p = 8
max_ = len(response)
while p<max_:
length = unpack ( '>L', response[p:p+4] )[0]
k = response[p+4:p+length+4]
p += 4+length
length = unpack ( '>L', response[p:p+4] )[0]
v = response[p+4:p+length+4]
p += 4+length
res += [[k, v]]
return res
### persistent connections
def Open(self):
if self._socket:
self._error = 'already connected'
return None
server = self._Connect()
if not server:
return None
# command, command version = 0, body length = 4, body = 1
request = pack ( '>hhII', SEARCHD_COMMAND_PERSIST, 0, 4, 1 )
self._Send ( server, request )
self._socket = server
return True
def Close(self):
if not self._socket:
self._error = 'not connected'
return
self._socket.close()
self._socket = None
def EscapeString(self, string):
return re.sub(r"([=\(\)|\-!@~\"&/\\\^\$\=\<])", r"\\\1", string)
def FlushAttributes(self):
sock = self._Connect()
if not sock:
return -1
request = pack ( '>hhI', SEARCHD_COMMAND_FLUSHATTRS, VER_COMMAND_FLUSHATTRS, 0 ) # cmd, ver, bodylen
self._Send ( sock, request )
response = self._GetResponse ( sock, VER_COMMAND_FLUSHATTRS )
if not response or len(response)!=4:
self._error = 'unexpected response length'
return -1
tag = unpack ( '>L', response[0:4] )[0]
return tag
def AssertInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=-2**32-1 and value<=2**32-1)
def AssertUInt32 ( value ):
assert(isinstance(value, (int, long)))
assert(value>=0 and value<=2**32-1)
def SetBit ( flag, bit, on ):
if on:
flag += ( 1<<bit )
else:
reset = 255 ^ ( 1<<bit )
flag = flag & reset
return flag
#
# $Id$
#
| gpl-2.0 | -197,369,445,877,807,000 | 27.323648 | 228 | 0.632576 | false |
nurnbeck/project-2-CMPUT-291 | ret_DATA.py | 1 | 1814 | import os
import time
import bsddb3 as bsddb
'''
Retrieve records with a given data
- Assume that database is closed before calling ret_DATA();
- Writes (append) the result to the file 'answers'.
For now I assume that indexfile = btree, further tests are necessary.
Tested under DB_SIZE = 10
'''
DB_FILE = "/tmp/yishuo_db/sample_db"
SDB_FILE = "/tmp/yishuo_db/IndexFile"
def ret_DATA(filetype):
if filetype == 'btree':
db = bsddb.btopen(DB_FILE, 'r')
elif filetype == 'hash':
db = bsddb.hashopen(DB_FILE, 'r')
elif filetype == 'indexfile':
db = bsddb.btopen(DB_FILE, 'r')
indexfile = bsddb.hashopen(SDB_FILE, 'r')
else:
print("Unknown type, function terminated\n")
return
# open answers for writing, appending to the end of the file
answers = open('answers', 'a')
result_lst = []
data = input("Enter the data you want to search > ")
data = data.encode(encoding = 'UTF-8')
start_time = time.time()
for key in db.keys():
if db[key] == data:
result_lst.append(key.decode(encoding = 'UTF-8'))
end_time = time.time()
elapse_time = (end_time - start_time) * 1000000
print("Result:")
data = data.decode(encoding = 'UTF-8')
if result_lst:
for key in result_lst:
print('Key:', key)
answers.write(key)
answers.write('\n')
print('Data:', data)
answers.write(data)
answers.write('\n')
answers.write('\n')
else:
print("Data not found")
print()
print(len(result_lst), "record(s) received")
print("Used", elapse_time, "micro seconds")
print()
answers.close()
db.close()
if filetype == 'indexfile':
indexfile.close()
return
| mit | 2,081,410,483,051,653,400 | 24.914286 | 69 | 0.582139 | false |
benjamindeleener/odoo | addons/l10n_ca/__openerp__.py | 1 | 1831 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Canada - Accounting',
'version': '1.0',
'author': 'Savoir-faire Linux',
'website': 'https://www.savoirfairelinux.com',
'category': 'Localization/Account Charts',
'description': """
This is the module to manage the Canadian accounting chart in Odoo.
===========================================================================================
Canadian accounting charts and localizations.
Fiscal positions
----------------
When considering taxes to be applied, it is the province where the delivery occurs that matters.
Therefore we decided to implement the most common case in the fiscal positions: delivery is the
responsibility of the vendor and done at the customer location.
Some examples:
1) You have a customer from another province and you deliver to his location.
On the customer, set the fiscal position to his province.
2) You have a customer from another province. However this customer comes to your location
with their truck to pick up products. On the customer, do not set any fiscal position.
3) An international vendor doesn't charge you any tax. Taxes are charged at customs
by the customs broker. On the vendor, set the fiscal position to International.
4) An international vendor charge you your provincial tax. They are registered with your
position.
""",
'depends': [
'account',
'base_iban',
'base_vat',
'l10n_multilang',
],
'data': [
'account_chart_template.xml',
'account_chart.xml',
'account_chart_template_after.xml',
'account_tax.xml',
'fiscal_templates.xml',
'account_chart_template.yml',
],
'installable': True,
'post_init_hook': 'load_translations',
}
| gpl-3.0 | 8,879,892,428,055,025,000 | 33.54717 | 96 | 0.656472 | false |
AnotherBobSmith/CLUZ | cluz_dialog3.py | 1 | 15904 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
A QGIS plugin
CLUZ for QGIS
-------------------
begin : 2016-23-02
copyright : (C) 2016 by Bob Smith, DICE
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import qgis
import os
import csv
import cluz_setup
import cluz_functions1
import cluz_functions3
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/forms")
from cluz_form_target import Ui_targetDialog
from cluz_form_abund_select import Ui_abundSelectDialog
from cluz_form_abund import Ui_abundDialog
from cluz_form_change import Ui_ChangeStatusDialog
from cluz_form_identify import Ui_identifyDialog
from cluz_form_met import Ui_metDialog
class targetDialog(QDialog, Ui_targetDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
targetDict = cluz_setup.makeTargetDict(setupObject)
if targetDict != "blank":
setupObject.targetDict = targetDict
self.loadTargetDictData(setupObject)
def loadTargetDictData(self, setupObject):
decPrec = setupObject.decimalPlaces
targetCSVFilePath = setupObject.targetPath
decPrecHeaderNameList = ["target", "spf", "conserved", "total"] # List of columns that will be changed to decimal precision
pcValueUpdate = False
with open(targetCSVFilePath, 'rb') as f:
targetReader = csv.reader(f)
targetHeaderList = targetReader.next()
lowerHeaderList = []
for aHeader in targetHeaderList:
lowerHeaderList.append(aHeader.lower())
self.targetTableWidget.clear()
self.targetTableWidget.setColumnCount(len(targetHeaderList))
insertRowNumber = 0
for aRow in targetReader:
pcValue = aRow[lowerHeaderList.index("pc_target")]
targetValue = float(aRow[lowerHeaderList.index("target")])
consValue = float(aRow[lowerHeaderList.index("conserved")])
if targetValue <= 0:
limboPCValue = "-1"
else:
limboPCValue = consValue / targetValue
limboPCValue *= 100
limboPCValue = cluz_setup.returnRoundedValue(setupObject, limboPCValue)
if float(limboPCValue) != float(pcValue):
pcValueUpdate = True
aRow[lowerHeaderList.index("pc_target")] = limboPCValue
addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec)
insertRowNumber += 1
self.targetTableWidget.setHorizontalHeaderLabels(targetHeaderList)
for aColValue in range(len(targetHeaderList)):
self.targetTableWidget.resizeColumnToContents(aColValue)
if pcValueUpdate == True:
cluz_setup.updateTargetCSVFromTargetDict(setupObject, setupObject.targetDict)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.targetTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.targetTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
def addTargetTableRow(self, aRow, targetHeaderList, decPrecHeaderNameList, insertRowNumber, decPrec):
self.targetTableWidget.insertRow(insertRowNumber)
for aColValue in range(len(targetHeaderList)):
headerName = targetHeaderList[aColValue].lower()
tableValue = aRow[aColValue]
if headerName in decPrecHeaderNameList:
tableValue = round(float(tableValue), decPrec)
tableValue = format(tableValue, "." + str(decPrec) + "f")
targTableItem = QTableWidgetItem(str(tableValue))
if headerName == "target":
targetValue = tableValue
elif headerName == "conserved":
conservedValue = tableValue
if headerName == "pc_target" and str(tableValue) == "-1":
targTableItem.setTextColor(QColor.fromRgb(128, 128, 128))
elif headerName == "pc_target" and float(tableValue) >= 0:
if float(conservedValue) < float(targetValue):
targTableItem.setTextColor(QColor.fromRgb(255, 0, 0))
else:
targTableItem.setTextColor(QColor.fromRgb(0, 102, 51))
self.targetTableWidget.setItem(insertRowNumber, aColValue, targTableItem)
class abundSelectDialog(QDialog, Ui_abundSelectDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
featStringDict = self.loadAbundSelectFeatureList(setupObject)
QObject.connect(self.okButton, SIGNAL("clicked()"), lambda: self.displayAbundValues(setupObject, featStringDict))
def loadAbundSelectFeatureList(self, setupObject):
featIDList = setupObject.targetDict.keys()
featIDList.sort()
featStringList = []
featStringDict = {}
for aFeat in featIDList:
aString = str(aFeat) + " - " + setupObject.targetDict[aFeat][0]
featStringList.append(aString)
featStringDict[aString] = aFeat
self.featListWidget.addItems(featStringList)
return featStringDict
def displayAbundValues(self, setupObject, featStringDict):
selectedFeatIDList = [featStringDict[item.text()] for item in self.featListWidget.selectedItems()]
if len(selectedFeatIDList) == 0:
selectedFeatIDList = setupObject.targetDict.keys()
self.close()
self.abundDialog = abundDialog(self, setupObject, selectedFeatIDList)
# show the dialog
self.abundDialog.show()
# Run the dialog event loop
result = self.abundDialog.exec_()
class abundDialog(QDialog, Ui_abundDialog):
def __init__(self, iface, setupObject, selectedFeatIDList):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
self.clip = QApplication.clipboard()
self.loadAbundDictData(setupObject, selectedFeatIDList)
def loadAbundDictData(self, setupObject, selectedFeatIDList):
decPrec = setupObject.decimalPlaces
abundPUKeyDict = setupObject.abundPUKeyDict
featSet = set(selectedFeatIDList)
abundHeaderList = ["PU_ID"]
for aFeatID in featSet:
abundHeaderList.append("F_" + str(aFeatID))
self.abundTableWidget.clear()
self.abundTableWidget.setColumnCount(len(abundHeaderList))
insertRowNumber = 0
for puID in abundPUKeyDict:
self.abundTableWidget.insertRow(insertRowNumber)
zeroValue = round(0.0, decPrec)
zeroValue = format(zeroValue, "." + str(decPrec) + "f")
blankString = str(zeroValue)
puStringList = [blankString] * len(featSet)
puAbundDict = abundPUKeyDict[puID]
for featID in puAbundDict:
if featID in featSet:
featAmount = puAbundDict[featID]
featAmount = round(float(featAmount), decPrec)
featAmount = format(featAmount, "." + str(decPrec) + "f")
featIndex = list(featSet).index(featID)
puStringList[featIndex] = str(featAmount)
puStringList.insert(0, str(puID))
for aColValue in range(len(puStringList)):
featValue = puStringList[aColValue]
abundTableItem = QTableWidgetItem(str(featValue))
self.abundTableWidget.setItem(insertRowNumber, aColValue, abundTableItem)
insertRowNumber += 1
self.abundTableWidget.setHorizontalHeaderLabels(abundHeaderList)
for aColValue in range(len(abundHeaderList)):
self.abundTableWidget.resizeColumnToContents(aColValue)
# http://stackoverflow.com/questions/24971305/copy-pyqt-table-selection-including-column-and-row-headers
def keyPressEvent(self, e):
if (e.modifiers() & Qt.ControlModifier):
selected = self.abundTableWidget.selectedRanges()
if e.key() == Qt.Key_C: #copy
s = ""
for r in xrange(selected[0].topRow(), selected[0].bottomRow() + 1):
for c in xrange(selected[0].leftColumn(), selected[0].rightColumn()+1):
try:
s += str(self.abundTableWidget.item(r, c).text()) + "\t"
except AttributeError:
s += "\t"
s = s[:-1] + "\n" #eliminate last '\t'
self.clip.setText(s)
class changeStatusDialog(QDialog, Ui_ChangeStatusDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self, None, Qt.WindowStaysOnTopHint)
self.iface = iface
self.setupUi(self)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
self.undoButton.setEnabled(False)
QObject.connect(self.changeButton, SIGNAL("clicked()"), lambda: self.changeStatus(setupObject))
QObject.connect(self.undoButton, SIGNAL("clicked()"), lambda: self.undoStatusChange(setupObject))
QObject.connect(self.closeButton, SIGNAL("clicked()"), lambda: self.closeStatusDialog(setupObject))
def changeStatus(self, setupObject):
if self.availableButton.isChecked():
statusType = "Available"
elif self.earmarkedButton.isChecked():
statusType = "Earmarked"
elif self.conservedButton.isChecked():
statusType = "Conserved"
elif self.excludedButton.isChecked():
statusType = "Excluded"
changeLockedPUsBool = self.changeCheckBox.isChecked()
selectedPUIDStatusDict = cluz_functions3.changeStatusPuLayer(setupObject, statusType, changeLockedPUsBool)
changeAbundDict = cluz_functions3.calcChangeAbundDict(setupObject, selectedPUIDStatusDict, statusType)
targetDict = cluz_functions3.updateTargetDictWithChanges(setupObject, changeAbundDict)
setupObject.targetDict = targetDict
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = selectedPUIDStatusDict
self.undoButton.setEnabled(True)
def undoStatusChange(self, setupObject):
canvas = qgis.utils.iface.mapCanvas()
cluz_functions3.undoStatusChangeInPuLayer(setupObject)
newConTotDict = cluz_functions1.returnConTotDict(setupObject)
targetDict = cluz_functions1.updateConTotFieldsTargetDict(setupObject, newConTotDict)
cluz_setup.updateTargetCSVFromTargetDict(setupObject, targetDict)
setupObject.targetDict = targetDict
(targetsMetCount, targetCount) = cluz_functions3.returnTargetsMetTuple(setupObject)
self.targetsMetLabel.setText("Targets met: " + str(targetsMetCount) + " of " + str(targetCount))
setupObject.selectedPUIDStatusDict = "blank"
self.undoButton.setEnabled(False)
canvas.refresh()
def closeStatusDialog(self, setupObject):
self.close()
class identifyDialog(QDialog, Ui_identifyDialog):
def __init__(self, iface, setupObject, point):
QDialog.__init__(self)
self.iface = iface
self.setupUi(self)
selectedPUIDList = cluz_functions3.returnPointPUIDList(setupObject, point)
identDict, targetMetDict = cluz_functions3.makeIdentifyData(setupObject, selectedPUIDList)
titleString = cluz_functions3.setIdentifyDialogWindowTitle(selectedPUIDList, identDict)
if len(identDict.keys()) > 0:
self.identDict = identDict
self.targetMetDict = targetMetDict
self.showIdentifyData()
self.setWindowTitle(titleString)
self.setWindowTitle(titleString)
def showIdentifyData(self):
self.identifyTableWidget.clear()
self.identifyTableWidget.setColumnCount(7)
cluz_functions3.addIdenitfyDataToTableWidget(self.identifyTableWidget, self.targetMetDict, self.identDict)
headerList = ["ID ", "Name ", "Amount ", "As % of total ", "Target ", "As % of target ", "% of target currently met "]
self.identifyTableWidget.setHorizontalHeaderLabels(headerList)
for aColValue in range(len(headerList)):
self.identifyTableWidget.resizeColumnToContents(aColValue)
class metDialog(QDialog, Ui_metDialog):
def __init__(self, iface, setupObject):
QDialog.__init__(self)
outputPath = setupObject.outputPath
outputName = setupObject.outputName + "_mvbest.txt"
self.metTargetFile = outputPath + os.sep + outputName
self.iface = iface
self.setupUi(self)
self.metLoadTargetDictData()
self.setWindowTitle("Marxan Targets Met table for analysis " + setupObject.outputName)
def metLoadTargetDictData(self):
targetMetDict = {}
with open(self.metTargetFile, 'rb') as f:
targetMetReader = csv.reader(f)
targetMetHeaderList = next(targetMetReader, None)
for row in targetMetReader:
puID = int(row.pop(0))
targetMetDict[puID] = row
targetIDList = targetMetDict.keys()
targetIDList.sort()
self.metTableWidget.clear()
self.metTableWidget.setColumnCount(len(targetMetHeaderList))
insertRowNumber = 0
for aFeat in targetIDList:
self.metTableWidget.insertRow(insertRowNumber)
aRowList = targetMetDict[aFeat]
aRowList.insert(0, aFeat)
for aColValue in range(len(targetMetHeaderList)):
featValue = aRowList[aColValue]
metTableItem = QTableWidgetItem(str(featValue))
self.metTableWidget.setItem(insertRowNumber,aColValue,metTableItem)
insertRowNumber += 1
self.metTableWidget.setHorizontalHeaderLabels(targetMetHeaderList)
for aColValue in range(len(targetMetHeaderList)):
self.metTableWidget.resizeColumnToContents(aColValue)
| gpl-2.0 | -8,681,222,330,805,281,000 | 42.453552 | 131 | 0.624245 | false |
yakky/django-cms | cms/models/static_placeholder.py | 1 | 3681 | import uuid
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from six import text_type, python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
def static_slotname(instance):
"""
Returns a string to be used as the slot
for the static placeholder field.
"""
return instance.code
@python_2_unicode_compatible
class StaticPlaceholder(models.Model):
CREATION_BY_TEMPLATE = 'template'
CREATION_BY_CODE = 'code'
CREATION_METHODS = (
(CREATION_BY_TEMPLATE, _('by template')),
(CREATION_BY_CODE, _('by code')),
)
name = models.CharField(
verbose_name=_(u'static placeholder name'), max_length=255, blank=True, default='',
help_text=_(u'Descriptive name to identify this static placeholder. Not displayed to users.'))
code = models.CharField(
verbose_name=_(u'placeholder code'), max_length=255, blank=True,
help_text=_(u'To render the static placeholder in templates.'))
draft = PlaceholderField(static_slotname, verbose_name=_(u'placeholder content'), related_name='static_draft')
public = PlaceholderField(static_slotname, editable=False, related_name='static_public')
dirty = models.BooleanField(default=False, editable=False)
creation_method = models.CharField(
verbose_name=_('creation_method'), choices=CREATION_METHODS,
default=CREATION_BY_CODE, max_length=20, blank=True,
)
site = models.ForeignKey(Site, on_delete=models.CASCADE, null=True, blank=True)
class Meta:
verbose_name = _(u'static placeholder')
verbose_name_plural = _(u'static placeholders')
app_label = 'cms'
unique_together = (('code', 'site'),)
def __str__(self):
return self.get_name()
def get_name(self):
return self.name or self.code or text_type(self.pk)
get_name.short_description = _(u'static placeholder name')
def clean(self):
# TODO: check for clashes if the random code is already taken
if not self.code:
self.code = u'static-%s' % uuid.uuid4()
if not self.site:
placeholders = StaticPlaceholder.objects.filter(code=self.code, site__isnull=True)
if self.pk:
placeholders = placeholders.exclude(pk=self.pk)
if placeholders.exists():
raise ValidationError(_("A static placeholder with the same site and code already exists"))
def publish(self, request, language, force=False):
if force or self.has_publish_permission(request):
self.public.clear(language=language)
self.public.clear_cache(language=language)
plugins = self.draft.get_plugins_list(language=language)
copy_plugins_to(plugins, self.public, no_signals=True)
self.dirty = False
self.save()
return True
return False
def has_change_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
def has_publish_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and \
request.user.has_perm(opts.app_label + '.' + 'publish_page')
| bsd-3-clause | -8,891,000,634,964,670,000 | 39.01087 | 114 | 0.659332 | false |
nickgentoo/LSTM-timepredictionPMdata | code/nick_evaluate_suffix_and_remaining_time_only_time_OHenc.py | 1 | 15048 | '''
this script takes as input the LSTM or RNN weights found by train.py
change the path in line 178 of this script to point to the h5 file
with LSTM or RNN weights generated by train.py
Author: Niek Tax
'''
from __future__ import division
from keras.models import load_model
import csv
import copy
import numpy as np
import distance
from itertools import izip
from jellyfish._jellyfish import damerau_levenshtein_distance
import unicodecsv
from sklearn import metrics
from math import sqrt
import time
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from collections import Counter
from keras.models import model_from_json
import sys
fileprefix=sys.argv[1]
eventlog = sys.argv[2]
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
y_times = []
times = []
times2 = []
times3 = []
times4 = []
# nick
attributes = []
attributes_dict = []
attributes_sizes = []
numlines = 0
casestarttime = None
lasteventtime = None
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
y = []
for row in spamreader:
#print(row)
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
#test different format
#t = 0#time.strptime(row[2], "%Y/%m/%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
#print (line)
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
#target
y_times.extend([times2[-1]-k for k in times2])
timeseqs3.append(times3)
timeseqs4.append(times4)
for i in xrange(len(attributes)):
#print(attributesvalues[i])
attributes[i].append(attributesvalues[i])
else:
#if firstline. I have to add te elements to attributes
for a in row[3:]:
attributes.append([])
attributes_dict.append({})
attributes_sizes.append(0)
#print(attributes)
n_events_in_trace=0
line = ''
times = []
times2 = []
times3 = []
times4 = []
attributesvalues = [ ]
numlines+=1
n_events_in_trace+=1
line+=unichr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday()
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
lasteventtime = t
firstLine = False
indexnick=0
for a in row[3:]:
if len(attributesvalues)<=indexnick:
attributesvalues.append([])
a=a.strip('"')
#todo cast a intero se e intero if
if a!="":
try:
attr=float(a)
attributesvalues[indexnick].append(attr)
#print("float attr")
#print(a)
except:
if a not in attributes_dict[indexnick]:
attributes_dict[indexnick][a]=attributes_sizes[indexnick]+1
attributes_sizes[indexnick]=attributes_sizes[indexnick]+1
attributesvalues[indexnick].append(attributes_dict[indexnick][a])
else:
attributesvalues[indexnick].append(-1)
# if a in attributes_dict[indexnick]:
# attributesvalues.append(attributes_dict[indexnick][a])
# else:
# attributes_dict[indexnick][a]=attributes_sizes[indexnick]
# attributes_sizes[indexnick]+=1
# attributesvalues.append(attributes_dict[indexnick][a])
indexnick+=1
# add last case
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
y_times.extend([times2[-1] - k for k in times2])
for i in xrange(len(attributes)):
attributes[i].append(attributesvalues[i])
numlines+=1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
print('divisor: {}'.format(divisor))
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
print('divisor2: {}'.format(divisor2))
step = 1
sentences = []
softness = 0
next_chars = []
lines = map(lambda x: x + '!', lines)
maxlen = max(map(lambda x: len(x), lines))
chars = map(lambda x: set(x), lines)
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
lines = map(lambda x: x[:-2], lines)
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
#print(indices_char)
elems_per_fold = int(round(numlines / 3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold1.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold1, fold1_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold2 = lines[elems_per_fold:2 * elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2 * elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2 * elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2 * elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2 * elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold2.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold2, fold2_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold3 = lines[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
fold3_a=[a[2*elems_per_fold:] for a in attributes]
with open('output_files/folds/' + eventlog + 'fold3.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold3, fold3_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
y_t_seq=[]
for line in fold1+fold2:
for i in range(0, len(line), 1):
if i == 0:
continue
y_t_seq.append(y_times[0:i])
divisory = np.mean([item for sublist in y_t_seq for item in sublist])
print('divisory: {}'.format(divisory))
lines = fold3
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
attributes=fold3_a
# set parameters
predict_size = maxlen
# load json and create model
json_file = open('output_files/models/'+fileprefix+'_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("output_files/models/"+fileprefix+"_weights_best.h5")
print("Loaded model from disk")
y_t_seq=[]
# load model, set this to the model generated by train.py
#model = load_model('output_files/models/200_model_59-1.50.h5')
# define helper functions
def encode(ex, sentence, times,times2, times3,times4, sentences_attributes,maxlen=maxlen):
#num_features = len(chars)+5+len(sentences_attributes)
num_features = len(chars) + 5
for idx in xrange(len(attributes)):
num_features += attributes_sizes[idx] + 1
#print(num_features)
X = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen-len(sentence)
times2 = np.cumsum(times)
#print "sentence",len(sentence)
for t, char in enumerate(sentence):
#print(t)
#midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
#timesincemidnight = times3[t]-midnight
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char:
X[0, t+leftpad, char_indices[c]] = 1
X[0, t+leftpad, len(chars)] = t+1
X[0, t+leftpad, len(chars)+1] = times[t]/divisor
X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
X[0, t+leftpad, len(chars)+3] = times3[t]/86400
X[0, t+leftpad, len(chars)+4] = times4[t]/7
# for i in xrange(len(sentences_attributes)):
# #print(str(i)+" "+str(t))
# #print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5 + i] = sentences_attributes[i][t]
startoh = 0
for j in xrange(len(attributes)):
# X[i, t + leftpad, len(chars) + 5+j]=sentences_attributes[j][i][t]
if attributes_sizes[j] > 0:
X[0, t + leftpad, len(chars) + 5 + startoh + sentences_attributes[j][t]] = 1
else:
X[0, t + leftpad, len(chars) + 5 + startoh] = sentences_attributes[j][t]
startoh += (attributes_sizes[j] + 1)
return X
# # define helper functions
# def encode(sentence, times, times3, sentences_attributes,maxlen=maxlen):
# num_features = len(chars)+5+len(sentences_attributes)
# X = np.zeros((1, maxlen, num_features), dtype=np.float32)
# leftpad = maxlen-len(sentence)
# times2 = np.cumsum(times)
# print "sentence",len(sentence)
# for t, char in enumerate(sentence):
# midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
# timesincemidnight = times3[t]-midnight
# multiset_abstraction = Counter(sentence[:t+1])
# for c in chars:
# if c==char:
# X[0, t+leftpad, char_indices[c]] = 1
# X[0, t+leftpad, len(chars)] = t+1
# X[0, t+leftpad, len(chars)+1] = times[t]/divisor
# X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
# X[0, t+leftpad, len(chars)+3] = timesincemidnight.seconds/86400
# X[0, t+leftpad, len(chars)+4] = times3[t].weekday()/7
# for i in xrange(len(sentences_attributes)):
# print(str(i)+" "+str(t))
# print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5+i]=sentences_attributes[i][t]
# return X,y
def getSymbol(predictions):
maxPrediction = 0
symbol = ''
i = 0;
for prediction in predictions:
if(prediction>=maxPrediction):
maxPrediction = prediction
symbol = target_indices_char[i]
i += 1
return symbol
one_ahead_gt = []
one_ahead_pred = []
two_ahead_gt = []
two_ahead_pred = []
three_ahead_gt = []
three_ahead_pred = []
y_t_seq=[]
# make predictions
with open('output_files/results/'+fileprefix+'_suffix_and_remaining_time_%s' % eventlog, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Prefix length", "Groud truth", "Ground truth times", "Predicted times", "RMSE", "MAE", "Median AE"])
#considering also size 1 prefixes
#for prefix_size in range(1,maxlen):
#print(prefix_size)
#print(len(lines),len(attributes[0]))
for ex, (line, times, times2, times3, times4) in enumerate(izip(lines, lines_t, lines_t2, lines_t3, lines_t3)):
for prefix_size in range(1, len(line)):#aggiunto -1 perche non voglio avere 0 nel ground truth
#print(line,ex,len(line), len(attributes[0][ex]))
times.append(0)
cropped_line = ''.join(line[:prefix_size])
cropped_times = times[:prefix_size]
#print "times_len",len(cropped_times)
cropped_times2 = times2[:prefix_size]
cropped_times4 = times4[:prefix_size]
cropped_times3 = times3[:prefix_size]
cropped_attributes = [[] for i in xrange(len(attributes))]
for j in xrange(len(attributes)):
#print(attributes[j][ex])
cropped_attributes[j].extend(attributes[j][ex][0:prefix_size])
#print cropped_attributes
#y_t_seq.append(y_times[0:prefix_size])
#cropped_attributes= [a[:prefix_size] for a in attributes]
#print cropped_attribute
ground_truth = ''.join(line[prefix_size:prefix_size+predict_size])
ground_truth_t = times2[prefix_size-1] # era -1
#print(prefix_size,len(times2)-1)
case_end_time = times2[len(times2)-1]
ground_truth_t = case_end_time-ground_truth_t
predicted = ''
total_predicted_time = 0
#perform single prediction
enc = encode(ex,cropped_line, cropped_times,cropped_times2, cropped_times3,cropped_times4, cropped_attributes)
y = model.predict(enc, verbose=0) # make predictions
# split predictions into seperate activity and time predictions
#print y
y_t = y[0][0]
#prediction = getSymbol(y_char) # undo one-hot encoding
#cropped_line += prediction
if y_t<0:
y_t=0
cropped_times.append(y_t)
y_t = y_t * divisor
#cropped_times3.append(cropped_times3[-1] + timedelta(seconds=y_t))
total_predicted_time = total_predicted_time + y_t
output = []
if len(ground_truth)>0:
output.append(prefix_size)
output.append(unicode(ground_truth).encode("utf-8"))
output.append(ground_truth_t)
output.append(total_predicted_time)
output.append(metrics.mean_squared_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.mean_absolute_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.median_absolute_error([ground_truth_t], [total_predicted_time]))
spamwriter.writerow(output)
| gpl-3.0 | -5,484,811,627,186,544,000 | 37.192893 | 126 | 0.624003 | false |
shoyer/xarray | xarray/tests/test_variable.py | 1 | 87655 | import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import pytz
from xarray import Coordinate, Dataset, IndexVariable, Variable, set_options
from xarray.core import dtypes, duck_array_ops, indexing
from xarray.core.common import full_like, ones_like, zeros_like
from xarray.core.indexing import (
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
LazilyOuterIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
)
from xarray.core.pycompat import dask_array_type
from xarray.core.utils import NDArrayMixin
from xarray.core.variable import as_compatible_data, as_variable
from xarray.tests import requires_bottleneck
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
requires_sparse,
source_ndarray,
)
_PAD_XR_NP_ARGS = [
[{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))],
[{"x": 1}, ((1, 1), (0, 0), (0, 0))],
[{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))],
[{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))],
[{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))],
]
class VariableSubclassobjects:
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(["time"], data, {"foo": "bar"})
assert v.dims == ("time",)
assert_array_equal(v.values, data)
assert v.dtype == float
assert v.shape == (10,)
assert v.size == 10
assert v.sizes == {"time": 10}
assert v.nbytes == 80
assert v.ndim == 1
assert len(v) == 10
assert v.attrs == {"foo": "bar"}
def test_attrs(self):
v = self.cls(["time"], 0.5 * np.arange(10))
assert v.attrs == {}
attrs = {"foo": "bar"}
v.attrs = attrs
assert v.attrs == attrs
assert isinstance(v.attrs, dict)
v.attrs["foo"] = "baz"
assert v.attrs["foo"] == "baz"
def test_getitem_dict(self):
v = self.cls(["x"], np.random.randn(5))
actual = v[{"x": 0}]
expected = v[0]
assert_identical(expected, actual)
def test_getitem_1d(self):
data = np.array([0, 1, 2])
v = self.cls(["x"], data)
v_new = v[dict(x=[0, 1])]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=slice(None))]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
v_new = v[dict(x=Variable("a", [0, 1]))]
assert v_new.dims == ("a",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=1)]
assert v_new.dims == ()
assert_array_equal(v_new, data[1])
# tuple argument
v_new = v[slice(None)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
def test_getitem_1d_fancy(self):
v = self.cls(["x"], [0, 1, 2])
# 1d-variable should be indexable by multi-dimensional Variable
ind = Variable(("a", "b"), [[0, 1], [0, 1]])
v_new = v[ind]
assert v_new.dims == ("a", "b")
expected = np.array(v._data)[([0, 1], [0, 1]), ...]
assert_array_equal(v_new, expected)
# boolean indexing
ind = Variable(("x",), [True, False, True])
v_new = v[ind]
assert_identical(v[[0, 2]], v_new)
v_new = v[[True, False, True]]
assert_identical(v[[0, 2]], v_new)
with raises_regex(IndexError, "Boolean indexer should"):
ind = Variable(("a",), [True, False, True])
v[ind]
def test_getitem_with_mask(self):
v = self.cls(["x"], [0, 1, 2])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1])
)
assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1]))
assert_identical(
v._getitem_with_mask([0, -1, 1], fill_value=-99),
self.cls(["x"], [0, -99, 1]),
)
def test_getitem_with_mask_size_zero(self):
v = self.cls(["x"], [])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([-1, -1, -1]),
self.cls(["x"], [np.nan, np.nan, np.nan]),
)
def test_getitem_with_mask_nd_indexer(self):
v = self.cls(["x"], [0, 1, 2])
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer)
def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
assert variable[0].shape == ()
assert variable[0].ndim == 0
assert variable[0].size == 1
# test identity
assert variable.equals(variable.copy())
assert variable.identical(variable.copy())
# check value is equal for both ndarray and Variable
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
np.testing.assert_equal(variable.values[0], expected_value0)
np.testing.assert_equal(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
assert type(variable.values[0]) == type(expected_value0)
assert type(variable[0].values) == type(expected_value0)
elif expected_dtype is not False:
assert variable.values[0].dtype == expected_dtype
assert variable[0].values.dtype == expected_dtype
def test_index_0d_int(self):
for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
value = "foo"
dtype = np.dtype("U3")
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(["x"], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
x = self.cls(["x"], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(["x"], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
x = self.cls(["x"], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
def test_index_0d_not_a_time(self):
d = np.datetime64("NaT", "ns")
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper:
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return "{}(item={!r})".format(type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls("x", [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls("x", listarray)
assert_array_equal(x.data, listarray)
assert_array_equal(x[0].data, listarray.squeeze())
assert_array_equal(x.squeeze().data, listarray.squeeze())
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range("2011-09-01", periods=10)
for dates in [date_range, date_range.values, date_range.to_pydatetime()]:
expected = self.cls("t", dates)
for times in [
[expected[i] for i in range(10)],
[expected[i : (i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)],
]:
actual = Variable.concat(times, "t")
assert expected.dtype == actual.dtype
assert_array_equal(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls("time", pd.date_range("2000-01-01", periods=5))
expected = np.datetime64("2000-01-01", "ns")
assert x[0].values == expected
def test_datetime64_conversion(self):
times = pd.date_range("2000-01-01", periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("datetime64[s]"), False),
(times.to_pydatetime(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("datetime64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("timedelta64[s]"), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("timedelta64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls("x", data)
assert actual.dtype == data.dtype
def test_pandas_data(self):
v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1]))
assert_identical(v, v[[0, 1, 2]])
v = self.cls(["x"], pd.Index([0, 1, 2]))
assert v[0].values == v.values[0]
def test_pandas_period_index(self):
v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="B"))
v = v.load() # for dask-based Variable
assert v[0] == pd.Period("2000", freq="B")
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(["x"], x)
base_v = v.to_base_variable()
# unary ops
assert_identical(base_v, +v)
assert_identical(base_v, abs(v))
assert_array_equal((-v).values, -x)
# binary ops with numbers
assert_identical(base_v, v + 0)
assert_identical(base_v, 0 + v)
assert_identical(base_v, v * 1)
# binary ops with numpy arrays
assert_array_equal((v * x).values, x ** 2)
assert_array_equal((x * v).values, x ** 2)
assert_array_equal(v - y, v - 1)
assert_array_equal(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(["x"], x, {"units": "meters"})
assert_identical(base_v, +v2)
# binary ops with all variables
assert_array_equal(v + v, 2 * v)
w = self.cls(["x"], y, {"foo": "bar"})
assert_identical(v + w, self.cls(["x"], x + y).to_base_variable())
assert_array_equal((v * w).values, x * y)
# something complicated
assert_array_equal((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
assert float == (+v).dtype
assert float == (+v).values.dtype
assert float == (0 + v).dtype
assert float == (0 + v).values.dtype
# check types of returned data
assert isinstance(+v, Variable)
assert not isinstance(+v, IndexVariable)
assert isinstance(0 + v, Variable)
assert not isinstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(["x"], x)
actual = v.sum()
expected = Variable((), 10)
assert_identical(expected, actual)
assert type(actual) is Variable
def test_array_interface(self):
x = np.arange(5)
v = self.cls(["x"], x)
assert_array_equal(np.asarray(v), x)
# test patched in methods
assert_array_equal(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
assert_identical(v.argsort(), v.to_base_variable())
assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable())
# test ufuncs
assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable())
assert isinstance(np.sin(v), Variable)
assert not isinstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range("2000-01-01", periods=3),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
assert_array_equal(v.values, np.asarray(data))
assert_array_equal(np.asarray(v), np.asarray(data))
assert v[0].values == np.asarray(data)[0]
assert np.asarray(v[0]) == np.asarray(data)[0]
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
assert v.equals(v2)
assert v.identical(v2)
assert v.no_conflicts(v2)
assert v[0].equals(v2[0])
assert v[0].identical(v2[0])
assert v[0].no_conflicts(v2[0])
assert v[:2].equals(v2[:2])
assert v[:2].identical(v2[:2])
assert v[:2].no_conflicts(v2[:2])
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable("x", 3 * [False])
for v, _ in self.example_1d_objects():
actual = "z" == v
assert_identical(expected, actual)
actual = ~("z" != v)
assert_identical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2})
for actual in [
expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({"x": 3}),
expected.copy(deep=True),
expected.copy(deep=False),
]:
assert_identical(expected.to_base_variable(), actual.to_base_variable())
assert expected.encoding == actual.encoding
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(["a"], x)
w = self.cls(["a"], y)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
with raises_regex(ValueError, "Variable has dimensions"):
Variable.concat([v, Variable(["c"], y)], "b")
# test indexers
actual = Variable.concat(
[v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a"
)
expected = Variable("a", np.array([x, y]).ravel(order="F"))
assert_identical(expected, actual)
# test concatenating along a dimension
v = Variable(["time", "x"], np.random.random((10, 8)))
assert_identical(v, Variable.concat([v[:5], v[5:]], "time"))
assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time"))
assert_identical(v, Variable.concat([v[:1], v[1:]], "time"))
# test dimension order
assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x"))
with raises_regex(ValueError, "all input arrays must have"):
Variable.concat([v[:, 0], v[:, 1:]], "x")
def test_concat_attrs(self):
# always keep attrs from first variable
v = self.cls("a", np.arange(5), {"foo": "bar"})
w = self.cls("a", np.ones(5))
expected = self.cls(
"a", np.concatenate([np.arange(5), np.ones(5)])
).to_base_variable()
expected.attrs["foo"] = "bar"
assert_identical(expected, Variable.concat([v, w], "a"))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ["S", "U"]:
x = self.cls("animal", np.array(["horse"], dtype=kind))
y = self.cls("animal", np.array(["aardvark"], dtype=kind))
actual = Variable.concat([x, y], "animal")
expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
assert_equal(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls("x", ["0", "1", "2"])
b = self.cls("x", ["3", "4"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.arange(5).astype(str))
assert_identical(expected, actual)
assert actual.dtype.kind == expected.dtype.kind
def test_concat_mixed_dtypes(self):
a = self.cls("x", [0, 1])
b = self.cls("x", ["two"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.array([0, 1, "two"], dtype=object))
assert_identical(expected, actual)
assert actual.dtype == object
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("astype", [float, int, str])
def test_copy(self, deep, astype):
v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"})
w = v.copy(deep=deep)
assert type(v) is type(w)
assert_identical(v, w)
assert v.dtype == w.dtype
if self.cls is Variable:
if deep:
assert source_ndarray(v.values) is not source_ndarray(w.values)
else:
assert source_ndarray(v.values) is source_ndarray(w.values)
assert_identical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
v = self.cls("x", midx)
for deep in [True, False]:
w = v.copy(deep=deep)
assert isinstance(w._data, PandasIndexAdapter)
assert isinstance(w.to_index(), pd.MultiIndex)
assert_array_equal(v._data.array, w._data.array)
def test_copy_with_data(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = np.array([[2.5, 5.0], [7.1, 43]])
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
def test_copy_with_data_errors(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = [2.5, 5.0]
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
def test_copy_index_with_data(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 10)
actual = orig.copy(data=new_data)
expected = IndexVariable("x", np.arange(5, 10))
assert_identical(expected, actual)
def test_copy_index_with_data_errors(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 20)
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
with raises_regex(ValueError, "Cannot assign to the .data"):
orig.data = new_data
with raises_regex(ValueError, "Cannot assign to the .values"):
orig.values = new_data
def test_replace(self):
var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
result = var._replace()
assert_identical(result, var)
new_data = np.arange(4).reshape(2, 2)
result = var._replace(data=new_data)
assert_array_equal(result.data, new_data)
def test_real_and_imag(self):
v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"})
expected_re = self.cls("x", np.arange(3), {"foo": "bar"})
assert_identical(v.real, expected_re)
expected_im = self.cls("x", -np.arange(3), {"foo": "bar"})
assert_identical(v.imag, expected_im)
expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
assert_allclose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls("x", [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
assert_allclose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype="int64"))
v = self.cls("x", data)
print(v) # should not error
assert v.dtype == "int64"
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(
start="2000-01-01",
tz=pytz.timezone("America/New_York"),
periods=10,
freq="1h",
)
v = self.cls("x", data)
print(v) # should not error
if "America/New_York" in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == "object"
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
v = self.cls("x", idx)
assert_identical(Variable((), ("a", 0)), v[0])
assert_identical(v, v[:])
def test_load(self):
array = self.cls("x", np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
if array.chunks is None:
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
assert_identical(array, copied)
def test_getitem_advanced(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
# orthogonal indexing
v_new = v[([0, 1], [1, 0])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]])
v_new = v[[0, 1]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]])
# with mixed arguments
ind = Variable(["a"], [0, 1])
v_new = v[dict(x=[0, 1], y=ind)]
assert v_new.dims == ("x", "a")
assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]])
# boolean indexing
v_new = v[dict(x=[True, False], y=[False, True, False])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[0][1])
# with scalar variable
ind = Variable((), 2)
v_new = v[dict(y=ind)]
expected = v[dict(y=2)]
assert_array_equal(v_new, expected)
# with boolean variable with wrong shape
ind = np.array([True, False])
with raises_regex(IndexError, "Boolean array size 2 is "):
v[Variable(("a", "b"), [[0, 1]]), ind]
# boolean indexing with different dimension
ind = Variable(["a"], [True, False, False])
with raises_regex(IndexError, "Boolean indexer should be"):
v[dict(y=ind)]
def test_getitem_uint_1d(self):
# regression test for #1405
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[0])
def test_getitem_uint(self):
# regression test for #1405
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.uint64(0)]
assert_array_equal(v_new, v_data[0, :])
def test_getitem_0d_array(self):
# make sure 0d-np.array can be used as an indexer
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])[0]]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array(0)]
assert_array_equal(v_new, v_data[0])
v_new = v[Variable((), np.array(0))]
assert_array_equal(v_new, v_data[0])
def test_getitem_fancy(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
# It would be ok if indexed with the multi-dimensional array including
# the same name
ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("x", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]])
v_new = v[dict(y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=[1, 0], y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[[1, 0]][:, ind])
# along diagonal
ind = Variable(["a"], [0, 1])
v_new = v[ind, ind]
assert v_new.dims == ("a",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with integer
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=0, y=ind)]
assert v_new.dims == ("a", "b")
assert_array_equal(v_new[0], v_data[0][[0, 0]])
assert_array_equal(v_new[1], v_data[0][[1, 1]])
# with slice
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=slice(None), y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None, 1))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)])
# slice matches explicit dimension
ind = Variable(["y"], [0, 1])
v_new = v[ind, :2]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with multiple slices
v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]])
ind = Variable(["a", "b"], [[0]])
v_new = v[ind, :, :]
expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...])
assert_identical(v_new, expected)
v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]])
ind = Variable(["y"], [0])
v_new = v[ind, :, 1:2, 2]
expected = Variable(["y", "x"], [[6]])
assert_identical(v_new, expected)
# slice and vector mixed indexing resulting in the same dimension
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1, 2])
v_new = v[:, ind]
expected = Variable(("x", "z"), np.zeros((3, 5)))
expected[0] = v.data[0, 0]
expected[1] = v.data[1, 1]
expected[2] = v.data[2, 2]
assert_identical(v_new, expected)
v_new = v[:, ind.data]
assert v_new.shape == (3, 3, 5)
def test_getitem_error(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
with raises_regex(IndexError, "labeled multi-"):
v[[[0, 1], [1, 2]]]
ind_x = Variable(["a"], [0, 1, 1])
ind_y = Variable(["a"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers "):
v[ind_x, ind_y]
ind = Variable(["a", "b"], [[True, False], [False, True]])
with raises_regex(IndexError, "2-dimensional boolean"):
v[dict(x=ind)]
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers mis"):
v[:, ind]
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
pytest.param(
"linear_ramp",
marks=pytest.mark.xfail(
reason="pint bug: https://github.com/hgrecco/pint/issues/1026"
),
),
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode=mode, **xr_arg)
expected = np.pad(data, np_arg, mode=mode)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(**xr_arg)
expected = np.pad(
np.array(v.data.astype(float)),
np_arg,
mode="constant",
constant_values=np.nan,
)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode="constant", constant_values=False, **xr_arg)
expected = np.pad(
np.array(v.data), np_arg, mode="constant", constant_values=False
)
assert_array_equal(actual, expected)
def test_rolling_window(self):
# Just a working test. See test_nputils for the algorithm validation
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for (d, w) in [("x", 3), ("y", 5)]:
v_rolling = v.rolling_window(d, w, d + "_window")
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
v_rolling = v.rolling_window(d, w, d + "_window", center=True)
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
# dask and numpy result should be the same
v_loaded = v.load().rolling_window(d, w, d + "_window", center=True)
assert_array_equal(v_rolling, v_loaded)
# numpy backend should not be over-written
if isinstance(v._data, np.ndarray):
with pytest.raises(ValueError):
v_loaded[0] = 1.0
class TestVariable(VariableSubclassobjects):
cls = staticmethod(Variable)
@pytest.fixture(autouse=True)
def setup(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(["time", "x"], self.d)
assert_array_equal(v.data, self.d)
assert_array_equal(v.values, self.d)
assert source_ndarray(v.values) is self.d
with pytest.raises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
assert source_ndarray(v.values) is d2
d3 = np.random.random((10, 3))
v.data = d3
assert source_ndarray(v.data) is d3
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
assert v.item() == 0
assert type(v.item()) is float
v = IndexVariable("x", np.arange(5))
assert 2 == v.searchsorted(2)
def test_datetime64_conversion_scalar(self):
expected = np.datetime64("2000-01-01", "ns")
for values in [
np.datetime64("2000-01-01"),
pd.Timestamp("2000-01-01T00"),
datetime(2000, 1, 1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("datetime64[ns]")
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, "ns")
for values in [
np.timedelta64(1, "D"),
pd.Timedelta("1 day"),
timedelta(days=1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("timedelta64[ns]")
def test_0d_str(self):
v = Variable([], "foo")
assert v.dtype == np.dtype("U3")
assert v.values == "foo"
v = Variable([], np.string_("foo"))
assert v.dtype == np.dtype("S3")
assert v.values == bytes("foo", "ascii")
def test_0d_datetime(self):
v = Variable([], pd.Timestamp("2000-01-01"))
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == np.datetime64("2000-01-01", "ns")
def test_0d_timedelta(self):
for td in [pd.to_timedelta("1s"), np.timedelta64(1, "s")]:
v = Variable([], td)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == np.timedelta64(10 ** 9, "ns")
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
assert v1.equals(v2)
assert v1.identical(v2)
v3 = Variable(("dim1", "dim3"), data=d)
assert not v1.equals(v3)
v4 = Variable(("dim1", "dim2"), data=d)
assert v1.equals(v4)
assert not v1.identical(v4)
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
assert not v1.equals(v5)
assert not v1.equals(None)
assert not v1.equals(d)
assert not v1.identical(None)
assert not v1.identical(d)
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(("x"), [np.nan, np.nan])
assert v1.broadcast_equals(v2)
assert not v1.equals(v2)
assert not v1.identical(v2)
v3 = Variable(("x"), [np.nan])
assert v1.broadcast_equals(v3)
assert not v1.equals(v3)
assert not v1.identical(v3)
assert not v1.broadcast_equals(None)
v4 = Variable(("x"), [np.nan] * 3)
assert not v2.broadcast_equals(v4)
def test_no_conflicts(self):
v1 = Variable(("x"), [1, 2, np.nan, np.nan])
v2 = Variable(("x"), [np.nan, 2, 3, np.nan])
assert v1.no_conflicts(v2)
assert not v1.equals(v2)
assert not v1.broadcast_equals(v2)
assert not v1.identical(v2)
assert not v1.no_conflicts(None)
v3 = Variable(("y"), [np.nan, 2, 3, np.nan])
assert not v3.no_conflicts(v1)
d = np.array([1, 2, np.nan, np.nan])
assert not v1.no_conflicts(d)
assert not v2.no_conflicts(d)
v4 = Variable(("w", "x"), [d])
assert v1.no_conflicts(v4)
def test_as_variable(self):
data = np.arange(10)
expected = Variable("x", data)
expected_extra = Variable(
"x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1}
)
assert_identical(expected, as_variable(expected))
ds = Dataset({"x": expected})
var = as_variable(ds["x"]).to_base_variable()
assert_identical(expected, var)
assert not isinstance(ds["x"], Variable)
assert isinstance(as_variable(ds["x"]), Variable)
xarray_tuple = (
expected_extra.dims,
expected_extra.values,
expected_extra.attrs,
expected_extra.encoding,
)
assert_identical(expected_extra, as_variable(xarray_tuple))
with raises_regex(TypeError, "tuple of form"):
as_variable(tuple(data))
with raises_regex(ValueError, "tuple of form"): # GH1016
as_variable(("five", "six", "seven"))
with raises_regex(TypeError, "without an explicit list of dimensions"):
as_variable(data)
actual = as_variable(data, name="x")
assert_identical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
assert_identical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(("x", "y"), data)
with raises_regex(ValueError, "without explicit dimension names"):
as_variable(data, name="x")
with raises_regex(ValueError, "has more than 1-dimension"):
as_variable(expected, name="x")
# test datetime, timedelta conversion
dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)])
assert as_variable(dt, "time").dtype.kind == "M"
td = np.array([timedelta(days=x) for x in range(10)])
assert as_variable(td, "time").dtype.kind == "m"
def test_repr(self):
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
expected = dedent(
"""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
"""
).strip()
assert expected == repr(v)
def test_repr_lazy_data(self):
v = Variable("x", LazilyOuterIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
assert isinstance(v._data, LazilyOuterIndexedArray)
def test_detect_indexer_type(self):
""" Tests indexer type was correctly detected. """
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
_, ind, _ = v._broadcast_indexes((0, 1))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2)))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, [0, 1]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], 1))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], [1, 2]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("y",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, vind))
assert type(ind) == indexing.VectorizedIndexer
vind = Variable(("a", "b"), [[0, 2], [1, 3]])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.VectorizedIndexer
def test_indexer_type(self):
# GH:issue:1688. Wrong indexer type induces NotImplementedError
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
def assert_indexer_type(key, object_type):
dims, index_tuple, new_order = v._broadcast_indexes(key)
assert isinstance(index_tuple, object_type)
# should return BasicIndexer
assert_indexer_type((0, 1), BasicIndexer)
assert_indexer_type((0, slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer)
# should return OuterIndexer
assert_indexer_type(([0, 1], 1), OuterIndexer)
assert_indexer_type(([0, 1], [1, 2]), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer)
assert_indexer_type(
(Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer
)
# should return VectorizedIndexer
assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer)
assert_indexer_type(
(Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer
)
assert_indexer_type(
(
Variable(("a", "b"), [[0, 1], [1, 2]]),
Variable(("a", "b"), [[0, 1], [1, 2]]),
),
VectorizedIndexer,
)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
# test slicing
assert_identical(v, v[:])
assert_identical(v, v[...])
assert_identical(Variable(["y"], data[0]), v[0])
assert_identical(Variable(["x"], data[:, 0]), v[:, 0])
assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2])
# test array indexing
x = Variable(["x"], np.arange(10))
y = Variable(["y"], np.arange(11))
assert_identical(v, v[x.values])
assert_identical(v, v[x])
assert_identical(v[:3], v[x < 3])
assert_identical(v[:, 3:], v[:, y >= 3])
assert_identical(v[:3, 3:], v[x < 3, y >= 3])
assert_identical(v[:3, :2], v[x[:3], y[:2]])
assert_identical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
assert_identical(Variable(["y"], data[n]), item)
with raises_regex(TypeError, "iteration over a 0-d"):
iter(Variable([], 0))
# test setting
v.values[:] = 0
assert np.all(v.values == 0)
# test orthogonal setting
v[range(10), range(11)] = 1
assert_array_equal(v.values, np.ones((10, 11)))
def test_getitem_basic(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
# int argument
v_new = v[0]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
# slice argument
v_new = v[:2]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[:2])
# list arguments
v_new = v[[0]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[0]])
v_new = v[[]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[]])
# dict arguments
v_new = v[dict(x=0)]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=slice(None))]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=1)]
assert v_new.dims == ()
assert_array_equal(v_new, v._data[0, 1])
v_new = v[dict(y=1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# tuple argument
v_new = v[(slice(None), 1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# test that we obtain a modifiable view when taking a 0d slice
v_new = v[0, 0]
v_new[...] += 99
assert_array_equal(v_new, v._data[0, 0])
def test_getitem_with_mask_2d_input(self):
v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]])
assert_identical(
v._getitem_with_mask(([-1, 0], [1, -1])),
Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]),
)
assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v)
def test_isel(self):
v = Variable(["time", "x"], self.d)
assert_identical(v.isel(time=slice(None)), v)
assert_identical(v.isel(time=0), v[0])
assert_identical(v.isel(time=slice(0, 3)), v[:3])
assert_identical(v.isel(x=0), v[:, 0])
assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]])
assert_identical(v.isel(time=[]), v[[]])
with raises_regex(
ValueError,
r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0, missing_dims="warn")
assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore"))
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_("asdf"))
assert_identical(v[()], v)
v = Variable([], np.unicode_("asdf"))
assert_identical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(("x"), ["tmax"])[0][()]
expected = Variable((), "tmax")
assert_identical(actual, expected)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_shift(self, fill_value):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.shift(x=0))
assert v is not v.shift(x=0)
expected = Variable("x", [np.nan, np.nan, 1, 2, 3])
assert_identical(expected, v.shift(x=2))
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_exp = np.nan
else:
fill_value_exp = fill_value
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4])
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
expected = Variable("x", [2, 3, 4, 5, fill_value_exp])
assert_identical(expected, v.shift(x=-1, fill_value=fill_value))
expected = Variable("x", [fill_value_exp] * 5)
assert_identical(expected, v.shift(x=5, fill_value=fill_value))
assert_identical(expected, v.shift(x=6, fill_value=fill_value))
with raises_regex(ValueError, "dimension"):
v.shift(z=0)
v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"})
assert_identical(v, v.shift(x=0))
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"})
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
def test_shift2d(self):
v = Variable(("x", "y"), [[1, 2], [3, 4]])
expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]])
assert_identical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.roll(x=0))
assert v is not v.roll(x=0)
expected = Variable("x", [5, 1, 2, 3, 4])
assert_identical(expected, v.roll(x=1))
assert_identical(expected, v.roll(x=-4))
assert_identical(expected, v.roll(x=6))
expected = Variable("x", [4, 5, 1, 2, 3])
assert_identical(expected, v.roll(x=2))
assert_identical(expected, v.roll(x=-3))
with raises_regex(ValueError, "dimension"):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(("x", "y"), np.random.randn(5, 6))
for axis, dim in [(0, "x"), (1, "y")]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
assert_array_equal(expected, actual)
def test_transpose(self):
v = Variable(["time", "x"], self.d)
v2 = Variable(["x", "time"], self.d.T)
assert_identical(v, v2.transpose())
assert_identical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(["a", "b", "c", "d"], x)
w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
assert w2.shape == (5, 3, 4, 2)
assert_identical(w2, w.transpose("d", "b", "c", "a"))
assert_identical(w2, w.transpose("d", ..., "a"))
assert_identical(w2, w.transpose("d", "b", "c", ...))
assert_identical(w2, w.transpose(..., "b", "c", "a"))
assert_identical(w, w2.transpose("a", "b", "c", "d"))
w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
assert_identical(w, w3.transpose("a", "b", "c", "d"))
def test_transpose_0d(self):
for value in [
3.5,
("a", 1),
np.datetime64("2000-01-01"),
np.timedelta64(1, "h"),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(["x", "y"], [[1]])
assert_identical(Variable([], 1), v.squeeze())
assert_identical(Variable(["y"], [1]), v.squeeze("x"))
assert_identical(Variable(["y"], [1]), v.squeeze(["x"]))
assert_identical(Variable(["x"], [1]), v.squeeze("y"))
assert_identical(Variable([], 1), v.squeeze(["x", "y"]))
v = Variable(["x", "y"], [[1, 2]])
assert_identical(Variable(["y"], [1, 2]), v.squeeze())
assert_identical(Variable(["y"], [1, 2]), v.squeeze("x"))
with raises_regex(ValueError, "cannot select a dimension"):
v.squeeze("y")
def test_get_axis_num(self):
v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4))
assert v.get_axis_num("x") == 0
assert v.get_axis_num(["x"]) == (0,)
assert v.get_axis_num(["x", "y"]) == (0, 1)
assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0)
with raises_regex(ValueError, "not found in array dim"):
v.get_axis_num("foobar")
def test_set_dims(self):
v = Variable(["x"], [0, 1])
actual = v.set_dims(["x", "y"])
expected = Variable(["x", "y"], [[0], [1]])
assert_identical(actual, expected)
actual = v.set_dims(["y", "x"])
assert_identical(actual, expected.T)
actual = v.set_dims({"x": 2, "y": 2})
expected = Variable(["x", "y"], [[0, 0], [1, 1]])
assert_identical(actual, expected)
v = Variable(["foo"], [0, 1])
actual = v.set_dims("foo")
expected = v
assert_identical(actual, expected)
with raises_regex(ValueError, "must be a superset"):
v.set_dims(["z"])
def test_set_dims_object_dtype(self):
v = Variable([], ("a", 1))
actual = v.set_dims(("x",), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ("a", 1)
expected = Variable(["x"], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
actual = v.stack(z=("x", "y"))
expected = Variable("z", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=("x",))
expected = Variable(("y", "z"), v.data.T, v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=())
assert_identical(actual, v)
actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
expected = Variable(("X", "Y"), v.data, v.attrs)
assert_identical(actual, expected)
def test_stack_errors(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
with raises_regex(ValueError, "invalid existing dim"):
v.stack(z=("x1",))
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(x=("x",))
def test_unstack(self):
v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
actual = v.unstack(z={"x": 2, "y": 2})
expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4, "y": 1})
expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4})
expected = Variable("x", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
def test_unstack_errors(self):
v = Variable("z", [0, 1, 2, 3])
with raises_regex(ValueError, "invalid existing dim"):
v.unstack(foo={"x": 4})
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(z=("z",))
with raises_regex(ValueError, "the product of the new dim"):
v.unstack(z={"x": 5})
def test_unstack_2d(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.unstack(y={"z": 2})
expected = Variable(["x", "z"], v.data)
assert_identical(actual, expected)
actual = v.unstack(x={"z": 2})
expected = Variable(["y", "z"], v.data.T)
assert_identical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2})
assert_identical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(["a", "b"], x)
# 1d to 2d broadcasting
assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x)))
assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0])))
assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x)))
assert_identical(
v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0]))
)
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(["b", "c", "d"], y)
assert_identical(
v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y))
)
assert_identical(
w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x))
)
assert_identical(
v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0]))
)
def test_broadcasting_failures(self):
a = Variable(["x"], np.arange(10))
b = Variable(["x"], np.arange(5))
c = Variable(["x", "x"], np.arange(100).reshape(10, 10))
with raises_regex(ValueError, "mismatched lengths"):
a + b
with raises_regex(ValueError, "duplicate dimensions"):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(["x"], x)
v2 = v
v2 += 1
assert v is v2
# since we provided an ndarray for data, it is also modified in-place
assert source_ndarray(v.values) is x
assert_array_equal(v.values, np.arange(5) + 1)
with raises_regex(ValueError, "dimensions cannot change"):
v += Variable("y", np.arange(5))
def test_reduce(self):
v = Variable(["x", "y"], self.d, {"ignored": "attributes"})
assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0)))
assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x"))
assert_identical(
v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1)))
)
assert_identical(v.reduce(np.std), Variable([], self.d.std()))
assert_identical(
v.reduce(np.mean, "x").reduce(np.std, "y"),
Variable([], self.d.mean(axis=0).std()),
)
assert_allclose(v.mean("x"), v.reduce(np.mean, "x"))
with raises_regex(ValueError, "cannot supply both"):
v.mean(dim="x", axis=0)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=True)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=False)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]])
)
def test_quantile(self, q, axis, dim, skipna):
v = Variable(["x", "y"], self.d)
actual = v.quantile(q, dim=dim, skipna=skipna)
_percentile_func = np.nanpercentile if skipna else np.percentile
expected = _percentile_func(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]])
def test_quantile_dask(self, q, axis, dim):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
actual = v.quantile(q, dim=dim)
assert isinstance(actual.data, dask_array_type)
expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_chunked_dim_error(self):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
with raises_regex(ValueError, "dimension 'x'"):
v.quantile(0.5, dim="x")
@pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]])
def test_quantile_out_of_bounds(self, q):
v = Variable(["x", "y"], self.d)
# escape special characters
with raises_regex(ValueError, r"Quantiles must be in the range \[0, 1\]"):
v.quantile(q, dim="x")
@requires_dask
@requires_bottleneck
def test_rank_dask_raises(self):
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]).chunk(2)
with raises_regex(TypeError, "arrays stored as dask"):
v.rank("x")
@requires_bottleneck
def test_rank(self):
import bottleneck as bn
# floats
v = Variable(["x", "y"], [[3, 4, np.nan, 1]])
expect_0 = bn.nanrankdata(v.data, axis=0)
expect_1 = bn.nanrankdata(v.data, axis=1)
np.testing.assert_allclose(v.rank("x").values, expect_0)
np.testing.assert_allclose(v.rank("y").values, expect_1)
# int
v = Variable(["x"], [3, 2, 1])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# str
v = Variable(["x"], ["c", "b", "a"])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# pct
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0])
v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0])
assert_equal(v.rank("x", pct=True), v_expect)
# invalid dim
with raises_regex(ValueError, "not found"):
v.rank("y")
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype=">f4")
v = Variable(["x"], data)
expected = Variable([], 5)
assert_identical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable("x", np.array([1, np.nan, 2, 3]))
assert_identical(v.mean(), Variable([], 2))
assert_identical(v.mean(skipna=True), Variable([], 2))
assert_identical(v.mean(skipna=False), Variable([], np.nan))
assert_identical(np.mean(v), Variable([], 2))
assert_identical(v.prod(), Variable([], 6))
assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6])))
assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6])))
assert_identical(v.var(), Variable([], 2.0 / 3))
assert_identical(v.median(), Variable([], 2))
v = Variable("x", [True, False, False])
assert_identical(v.any(), Variable([], True))
assert_identical(v.all(dim="x"), Variable([], False))
v = Variable("t", pd.date_range("2000-01-01", periods=3))
assert v.argmax(skipna=True) == 2
assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03")))
def test_reduce_keepdims(self):
v = Variable(["x", "y"], self.d)
assert_identical(
v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True))
)
assert_identical(
v.mean(dim="x", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)),
)
assert_identical(
v.mean(dim="y", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)),
)
assert_identical(
v.mean(dim=["y", "x"], keepdims=True),
Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)),
)
v = Variable([], 1.0)
assert_identical(
v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True))
)
@requires_dask
def test_reduce_keepdims_dask(self):
import dask.array
v = Variable(["x", "y"], self.d).chunk()
actual = v.mean(keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, keepdims=True))
assert_identical(actual, expected)
actual = v.mean(dim="y", keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True))
assert_identical(actual, expected)
def test_reduce_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
v = Variable(["x", "y"], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
assert len(vm.attrs) == 0
assert vm.attrs == {}
# Test kept attrs
vm = v.mean(keep_attrs=True)
assert len(vm.attrs) == len(_attrs)
assert vm.attrs == _attrs
def test_binary_ops_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
a = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
b = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
# Test dropped attrs
d = a - b # just one operation
assert d.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
d = a - b
assert d.attrs == _attrs
def test_count(self):
expected = Variable([], 3)
actual = Variable(["x"], [1, 2, 3, np.nan]).count()
assert_identical(expected, actual)
v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
actual = v.count()
assert_identical(expected, actual)
actual = Variable(["x"], [True, False, True]).count()
assert_identical(expected, actual)
assert actual.dtype == int
expected = Variable(["x"], [2, 3])
actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
assert_identical(expected, actual)
def test_setitem(self):
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[0, 1] = 1
assert v[0, 1] == 1
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[0, 1])] = 1
assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]]))
# boolean indexing
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False])] = 1
assert_array_equal(v[0], np.ones_like(v[0]))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False], y=[False, True, False])] = 1
assert v[0, 1] == 1
def test_setitem_fancy(self):
# assignment which should work as np.ndarray does
def assert_assigned_2d(array, key_x, key_y, values):
expected = array.copy()
expected[key_x, key_y] = values
v = Variable(["x", "y"], array)
v[dict(x=key_x, y=key_y)] = values
assert_array_equal(expected, v)
# 1d vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable((), 0),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=slice(None),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
# 2d-vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=[0],
)
assert_assigned_2d(
np.random.randn(5, 4),
key_x=Variable(["a", "b"], [[0, 1], [2, 3]]),
key_y=Variable(["a", "b"], [[1, 0], [3, 3]]),
values=[2, 3],
)
# vindex with slice
v = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
ind = Variable(["a"], [0, 1])
v[dict(x=ind, z=ind)] = 0
expected = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
expected[0, :, 0] = 0
expected[1, :, 1] = 0
assert_identical(expected, v)
# dimension broadcast
v = Variable(["x", "y"], np.ones((3, 2)))
ind = Variable(["a", "b"], [[0, 1]])
v[ind, :] = 0
expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]])
assert_identical(expected, v)
with raises_regex(ValueError, "shape mismatch"):
v[ind, ind] = np.zeros((1, 2, 1))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10)
assert_array_equal(v[0], np.ones_like(v[0]) * 10)
assert_array_equal(v[1], np.ones_like(v[1]) * 10)
assert v.dims == ("x", "y") # dimension should not change
# increment
v = Variable(["x", "y"], np.arange(6).reshape(3, 2))
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]])
assert_identical(v, expected)
ind = Variable(["a"], [0, 0])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]])
assert_identical(v, expected)
def test_coarsen(self):
v = self.cls(["x"], [0, 1, 2, 3, 4])
actual = v.coarsen({"x": 2}, boundary="pad", func="mean")
expected = self.cls(["x"], [0.5, 2.5, 4])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right")
expected = self.cls(["x"], [0, 1.5, 3.5])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim")
expected = self.cls(["x"], [1.5, 3.5])
assert_identical(actual, expected)
# working test
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for windows, func, side, boundary in [
({"x": 2}, np.mean, "left", "trim"),
({"x": 2}, np.median, {"x": "left"}, "pad"),
({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}),
]:
v.coarsen(windows, func, boundary, side)
def test_coarsen_2d(self):
# 2d-mean should be the same with the successive 1d-mean
v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean")
expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean")
assert_equal(actual, expected)
v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen(
{"y": 4}, func="mean", boundary="trim"
)
assert_equal(actual, expected)
# if there is nan, the two should be different
v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12))
v[2, 4] = np.nan
v[3, 5] = np.nan
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = (
v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen(
{"y": 4}, func="sum", boundary="trim"
)
/ 12
)
assert not actual.equals(expected)
# adjusting the nan count
expected[0, 1] *= 12 / 11
expected[1, 1] *= 12 / 11
assert_allclose(actual, expected)
v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4))
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
expected = self.cls(("x", "y"), 4 * np.ones((2, 2)))
assert_equal(actual, expected)
v[0, 0] = np.nan
v[-1, -1] = np.nan
expected[0, 0] = 3
expected[-1, -1] = 3
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False)
expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]])
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True)
expected = self.cls(("x", "y"), [[10, 18], [42, 35]])
assert_equal(actual, expected)
# perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops])
def test_coarsen_keep_attrs(self, operation="mean"):
_attrs = {"units": "test", "long_name": "testing"}
test_func = getattr(duck_array_ops, operation, None)
# Test dropped attrs
with set_options(keep_attrs=False):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == _attrs
@requires_dask
class TestVariableWithDask(VariableSubclassobjects):
cls = staticmethod(lambda *args: Variable(*args).chunk())
@pytest.mark.xfail
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.xfail
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.xfail
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.xfail
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@requires_sparse
class TestVariableWithSparse:
# TODO inherit VariableSubclassobjects to cover more tests
def test_as_sparse(self):
data = np.arange(12).reshape(3, 4)
var = Variable(("x", "y"), data)._as_sparse(fill_value=-1)
actual = var._to_dense()
assert_identical(var, actual)
class TestIndexVariable(VariableSubclassobjects):
cls = staticmethod(IndexVariable)
def test_init(self):
with raises_regex(ValueError, "must be 1-dimensional"):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(["time"], data, {"foo": "bar"})
assert pd.Index(data, name="time").identical(v.to_index())
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
v = IndexVariable(["x"], midx, {"foo": "bar"})
assert v.to_index().names == ("x_level_0", "x_level_1")
def test_data(self):
x = IndexVariable("x", np.arange(3.0))
assert isinstance(x._data, PandasIndexAdapter)
assert isinstance(x.data, np.ndarray)
assert float == x.dtype
assert_array_equal(np.arange(3), x)
assert float == x.values.dtype
with raises_regex(TypeError, "cannot be modified"):
x[:] = 0
def test_name(self):
coord = IndexVariable("x", [10.0])
assert coord.name == "x"
with pytest.raises(AttributeError):
coord.name = "y"
def test_level_names(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
assert x.level_names == midx.names
assert IndexVariable("y", [10.0]).level_names is None
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
level_1 = IndexVariable("x", midx.get_level_values("level_1"))
assert_identical(x.get_level_variable("level_1"), level_1)
with raises_regex(ValueError, "has no MultiIndex"):
IndexVariable("y", [10.0]).get_level_variable("level")
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])]
expected = IndexVariable("t", periods)
actual = IndexVariable.concat(coords, dim="t")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim="t", positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]])
coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])]
expected = IndexVariable("x", idx)
actual = IndexVariable.concat(coords, dim="x")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with pytest.warns(Warning, match="deprecated"):
x = Coordinate("x", [1, 2, 3])
assert isinstance(x, IndexVariable)
def test_datetime64(self):
# GH:1932 Make sure indexing keeps precision
t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]")
v = IndexVariable("t", t)
assert v[0].data == t[0]
# These tests make use of multi-dimensional variables, which are not valid
# IndexVariable objects:
@pytest.mark.xfail
def test_getitem_error(self):
super().test_getitem_error()
@pytest.mark.xfail
def test_getitem_advanced(self):
super().test_getitem_advanced()
@pytest.mark.xfail
def test_getitem_fancy(self):
super().test_getitem_fancy()
@pytest.mark.xfail
def test_getitem_uint(self):
super().test_getitem_fancy()
@pytest.mark.xfail
@pytest.mark.parametrize(
"mode",
[
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.xfail
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
super().test_pad_constant_values(xr_arg, np_arg)
@pytest.mark.xfail
def test_rolling_window(self):
super().test_rolling_window()
@pytest.mark.xfail
def test_coarsen_2d(self):
super().test_coarsen_2d()
class TestAsCompatibleData:
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, LazilyOuterIndexedArray)
for t in types:
for data in [
np.arange(3),
pd.date_range("2000-01-01", periods=3),
pd.date_range("2000-01-01", periods=3).values,
]:
x = t(data)
assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
assert_array_equal(np.asarray(input_array), actual)
assert np.ndarray == type(actual)
assert np.asarray(input_array).dtype == actual.dtype
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(int) == actual.dtype
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(float) == actual.dtype
def test_datetime(self):
expected = np.datetime64("2000-01-01")
actual = as_compatible_data(expected)
assert expected == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01", "ns")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
assert expected is source_ndarray(np.asarray(actual))
expected = np.datetime64("2000-01-01", "ns")
actual = as_compatible_data(datetime(2000, 1, 1))
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
assert_identical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
assert expect.dtype == bool
assert_identical(expect, full_like(orig, True, dtype=bool))
# raise error on non-scalar fill_value
with raises_regex(ValueError, "must be scalar"):
full_like(orig, [1.0, 2.0])
@requires_dask
def test_full_like_dask(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
assert actual.dtype == expect_dtype
assert actual.shape == orig.shape
assert actual.dims == orig.dims
assert actual.attrs == orig.attrs
assert actual.chunks == orig.chunks
assert_array_equal(actual.values, expect_values)
check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(
full_like(orig, True, dtype=bool),
bool,
np.full_like(orig.values, True, dtype=bool),
)
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(zeros_like(orig), full_like(orig, 0))
assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(ones_like(orig), full_like(orig, 1))
assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
def test_unsupported_type(self):
# Non indexable type
class CustomArray(NDArrayMixin):
def __init__(self, array):
self.array = array
class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed):
pass
array = CustomArray(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, np.ndarray) # should not be CustomArray
array = CustomIndexable(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, CustomIndexable)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Variable("x", [1, 2, np.NaN]) > 0
assert len(record) == 0
class TestBackendIndexing:
""" Make sure all the array wrappers can be indexed. """
@pytest.fixture(autouse=True)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def check_orthogonal_indexing(self, v):
assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
def check_vectorized_indexing(self, v):
ind_x = Variable("z", [0, 2])
ind_y = Variable("z", [2, 1])
assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
def test_NumpyIndexingAdapter(self):
v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# could not doubly wrapping
with raises_regex(TypeError, "NumpyIndexingAdapter only wraps "):
v = Variable(
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
def test_LazilyOuterIndexedArray(self):
v = Variable(dims=("x", "y"), data=LazilyOuterIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
data=LazilyOuterIndexedArray(LazilyOuterIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
dims=("x", "y"), data=LazilyOuterIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
def test_CopyOnWriteArray(self):
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"), data=CopyOnWriteArray(LazilyOuterIndexedArray(self.d))
)
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_MemoryCachedArray(self):
v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
@requires_dask
def test_DaskIndexingAdapter(self):
import dask.array as da
da = da.asarray(self.d)
v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
| apache-2.0 | -3,690,393,615,737,424,000 | 36.1105 | 88 | 0.53123 | false |
bountyful/bountyfulcoins | bountyfulcoinsapp/migrations/0001_initial.py | 1 | 9626 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Link'
db.create_table(u'bountyfulcoinsapp_link', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')
(unique=True, max_length=200)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Link'])
# Adding model 'Bounty'
db.create_table(u'bountyfulcoinsapp_bounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')
(max_length=200)),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('link', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Link'])),
('amount', self.gf('django.db.models.fields.DecimalField')
(default=0.0, max_digits=20, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')
(default='BTC', max_length=15)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Bounty'])
# Adding model 'Tag'
db.create_table(u'bountyfulcoinsapp_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')
(unique=True, max_length=64)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['Tag'])
# Adding M2M table for field bounties on 'Tag'
m2m_table_name = db.shorten_name(u'bountyfulcoinsapp_tag_bounties')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('tag', models.ForeignKey(
orm[u'bountyfulcoinsapp.tag'], null=False)),
('bounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.bounty'], null=False))
))
db.create_unique(m2m_table_name, ['tag_id', 'bounty_id'])
# Adding model 'SharedBounty'
db.create_table(u'bountyfulcoinsapp_sharedbounty', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('bounty', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['bountyfulcoinsapp.Bounty'], unique=True)),
('date', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('votes', self.gf(
'django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'bountyfulcoinsapp', ['SharedBounty'])
# Adding M2M table for field users_voted on 'SharedBounty'
m2m_table_name = db.shorten_name(
u'bountyfulcoinsapp_sharedbounty_users_voted')
db.create_table(m2m_table_name, (
('id', models.AutoField(
verbose_name='ID', primary_key=True, auto_created=True)),
('sharedbounty', models.ForeignKey(
orm[u'bountyfulcoinsapp.sharedbounty'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['sharedbounty_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Link'
db.delete_table(u'bountyfulcoinsapp_link')
# Deleting model 'Bounty'
db.delete_table(u'bountyfulcoinsapp_bounty')
# Deleting model 'Tag'
db.delete_table(u'bountyfulcoinsapp_tag')
# Removing M2M table for field bounties on 'Tag'
db.delete_table(db.shorten_name(u'bountyfulcoinsapp_tag_bounties'))
# Deleting model 'SharedBounty'
db.delete_table(u'bountyfulcoinsapp_sharedbounty')
# Removing M2M table for field users_voted on 'SharedBounty'
db.delete_table(
db.shorten_name(u'bountyfulcoinsapp_sharedbounty_users_voted'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bountyfulcoinsapp.bounty': {
'Meta': {'object_name': 'Bounty'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '20', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'BTC'", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Link']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'bountyfulcoinsapp.link': {
'Meta': {'object_name': 'Link'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'bountyfulcoinsapp.sharedbounty': {
'Meta': {'object_name': 'SharedBounty'},
'bounty': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'unique': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'users_voted': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'bountyfulcoinsapp.tag': {
'Meta': {'object_name': 'Tag'},
'bounties': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bountyfulcoinsapp.Bounty']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bountyfulcoinsapp']
| mit | -6,591,234,244,855,706,000 | 55.623529 | 195 | 0.566487 | false |
HengeSense/website | apps/news/widgets.py | 1 | 2559 | ############################################################################
# This file is part of the Maui Web site.
#
# Copyright (c) 2012 Pier Luigi Fiorini
# Copyright (c) 2009-2010 Krzysztof Grodzicki
#
# Author(s):
# Pier Luigi Fiorini <[email protected]>
#
# $BEGIN_LICENSE:AGPL3+$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $END_LICENSE$
############################################################################
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
class TinyMCEEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"js/jquery.tinymce.js",
)
def __init__(self, language=None):
self.language = language or settings.LANGUAGE_CODE[:2]
super(TinyMCEEditor, self).__init__()
def render(self, name, value, attrs=None):
rendered = super(TinyMCEEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/tinymce.html", context))
class WYMEditor(forms.Textarea):
class Media:
js = (
"js/jquery-1.8.1.min.js",
"cms/wymeditor/jquery.wymeditor.pack.js",
)
def __init__(self, language=None, attrs=None):
self.language = language or settings.LANGUAGE_CODE[:2]
self.attrs = {"class": "wymeditor"}
if attrs:
self.attrs.update(attrs)
super(WYMEditor, self).__init__(attrs)
def render(self, name, value, attrs=None):
rendered = super(WYMEditor, self).render(name, value, attrs)
context = {
"name": name,
"lang": self.language[:2],
"language": self.language,
"STATIC_URL": settings.STATIC_URL,
"page_link_wymeditor": 0,
"filebrowser": 0,
}
return rendered + mark_safe(render_to_string(
"admin/news/widgets/wymeditor.html", context))
| agpl-3.0 | 3,561,484,379,450,850,000 | 30.207317 | 76 | 0.664322 | false |
vine-comment/live_portal | live_portal/urls.py | 1 | 1677 | from live_portal import views
from views import *
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from registration.backends.simple.views import RegistrationView
from forms import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# admin
url(r'^admin/', include(admin.site.urls)),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'favicon/1.ico')),
# auth & accounts
url(r'^auth', TemplateView.as_view(template_name='registration/auth.html'), name='auth'),
url(r'^accounts/register/$',
RegistrationView.as_view(form_class=LivePortalRegistrationForm),
name='registration_register'),
url(r'^accounts/', include('registration.urls')),
url(r'^resetpassword/passwordsent/$', 'django.contrib.auth.views.password_reset', name='auth_password_reset'),
url(r'^changepassword/passwordsent/$', 'django.contrib.auth.views.password_change', name='auth_password_change'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', name='auth_logout'),
url(r'^users/(?P<name>.*?)/?$', HomeView.as_view(), name='user'),
# follows
url(r'^user/follows$', login_required(UserFollowsView.as_view()), name='user_follows'),
# show anchors
url(r'^show/(?P<tag>.*?)/?$', ShowView.as_view(), name='show'),
url(r'^/?$', HomeView.as_view(), name='home'),
# ajax
url(r'^ajax/enter_room/(?P<room>.*?)/?$', views.enter_room),
url(r'^ajax/follow_room/(?P<room>.*?)/?$', views.follow_room),
url(r'^ajax/unfollow_room/(?P<room>.*?)/?$', views.unfollow_room),
)
| gpl-3.0 | 2,427,552,201,301,461,000 | 39.902439 | 117 | 0.66607 | false |
karimbahgat/Pure-Python-Greiner-Hormann-Polygon-Clipping | GreinerHorman_Algo/KimKim/puremidpoints_v16(k&k,tryfixcrosschange).py | 1 | 36981 | # -*- coding: UTF-8 -*-
# Efficient Clipping of Arbitrary Polygons
#
# Copyright (c) 2011, 2012 Helder Correia <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# FINAL BEST IDEA, IMPLEMENTED NOW BUT DOESNT WORK CUS INTERSECTION STAGE NOT FINDING ALL INTERSECTIONS
# USE PREV AND NEXT MIDPOINT LOCS FOR DETERMINING ENTRY FLAG
# NORMAL RULES, EXCEPT FOR INTERSECTIONMODE TURN OFF INTERSECTIONFLAGS FOR OUT-ON-ON and ON-ON-OUT BC THEY ARE JUST TANGENT AND NOT RELATED TO INSIDES
# FINALLY WHEN TRAVERSING, AFTER COMPLETING ONE POLY, SEARCH FOR NEXT ISECT THAT IS UNCHECK IN BOTH CURRENT AND NEIGHRBOUR
"""
# Greiner-Hormann Polygon Clipping with support for degenerates
This is a fork aimed to improve Helder Correia's pure-Python Greiner-Hormann implementation for polygon clipping. Partly for educational purposes and partly for portable pure-Python clipping.
Status: Incomplete/unstable.
Fork author: Karim Bahgat <[email protected]>
-----------------------------------------------------------
# Efficient Clipping of Arbitrary Polygons
Based on the paper "Efficient Clipping of Arbitrary Polygons" by Günther
Greiner (greiner[at]informatik.uni-erlangen.de) and Kai Hormann
(hormann[at]informatik.tu-clausthal.de), ACM Transactions on Graphics
1998;17(2):71-83.
Available at: http://www.inf.usi.ch/hormann/papers/Greiner.1998.ECO.pdf
You should have received the README file along with this program.
If not, see <https://github.com/helderco/polyclip>
"""
DEBUG = False
class Vertex(object):
"""Node in a circular doubly linked list.
This class is almost exactly as described in the paper by Günther/Greiner.
"""
def __init__(self, vertex, alpha=0.0, intersect=False, entry=None, checked=False, degen=False):
if isinstance(vertex, Vertex):
vertex = (vertex.x, vertex.y)
# checked = True
self.x, self.y = vertex # point coordinates of the vertex
self.next = None # reference to the next vertex of the polygon
self.prev = None # reference to the previous vertex of the polygon
self.neighbour = None # reference to the corresponding intersection vertex in the other polygon
self.entry = entry # True if intersection is an entry point, False if exit
self.alpha = alpha # intersection point's relative distance from previous vertex
self.intersect = intersect # True if vertex is an intersection
self.checked = checked # True if the vertex has been checked (last phase)
self.couple = None
self.cross_change = None
@property
def xy(self):
return self.x, self.y
def isInside(self, poly):
if testLocation(self, poly) in ("in","on"):
return True
else: return False
def setChecked(self):
self.checked = True
if self.neighbour and not self.neighbour.checked:
self.neighbour.setChecked()
def copy(self):
copy = Vertex(self) # point coordinates of the vertex
copy.next = self.next # reference to the next vertex of the polygon
copy.prev = self.prev # reference to the previous vertex of the polygon
copy.neighbour = self.neighbour # reference to the corresponding intersection vertex in the other polygon
copy.entry = self.entry # True if intersection is an entry point, False if exit
copy.alpha = self.alpha # intersection point's relative distance from previous vertex
copy.intersect = self.intersect # True if vertex is an intersection
copy.couple = self.couple
copy.cross_change = self.cross_change
copy.checked = self.checked
return copy
def __repr__(self):
"""String representation of the vertex for debugging purposes."""
return "(%.2f, %.2f) <-> %s(%.2f, %.2f)%s <-> (%.2f, %.2f) %s" % (
self.prev.x, self.prev.y,
'i' if self.intersect else ' ',
self.x, self.y,
('e' if self.entry else 'x') if self.intersect else ' ',
self.next.x, self.next.y,
' !' if self.intersect and not self.checked else ''
)
class Polygon(object):
"""Manages a circular doubly linked list of Vertex objects that represents a polygon."""
first = None
def add(self, vertex):
"""Add a vertex object to the polygon (vertex is added at the 'end' of the list")."""
if not self.first:
self.first = vertex
self.first.next = vertex
self.first.prev = vertex
else:
next = self.first
prev = next.prev
next.prev = vertex
vertex.next = next
vertex.prev = prev
prev.next = vertex
def replace(self, old, new):
# when replacing old normal vertice with new intersection vertice at same xy
# only changes the attributes in place
old.intersect = new.intersect
old.x,old.y = new.x,new.y
old.neighbour = new.neighbour
old.neighbour.neighbour = old
old.entry = new.entry
old.alpha = new.alpha
## new.next = old.next
## new.prev = old.prev
## if old == self.first:
## #print "replaced first", self.first, new
## self.first = new
## old.prev.next = new
## old.next.prev = new
def insert(self, vertex, start, end):
"""Insert and sort a vertex between a specified pair of vertices.
This function inserts a vertex (most likely an intersection point)
between two other vertices (start and end). These other vertices
cannot be intersections (that is, they must be actual vertices of
the original polygon). If there are multiple intersection points
between the two vertices, then the new vertex is inserted based on
its alpha value.
"""
if vertex.xy == start.xy:
copy = vertex.copy()
self.replace(start, copy)
return # dont process further
elif vertex.xy == end.xy:
copy = vertex.copy()
self.replace(end, copy)
return # dont process further
# position based on alpha
curr = start
while curr != end and curr.alpha < vertex.alpha:
curr = curr.next
if vertex.xy == curr.prev.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr.prev
return # dont do it if same as a previously inserted intersection
if vertex.xy == curr.xy:
## if vertex.xy == curr.xy: self.replace(curr, vertex)
## elif vertex.xy == curr.prev.xy: self.replace(curr, vertex.prev)
vertex.neighbour.neighbour = curr
return # dont do it if same as a previously inserted intersection
vertex.next = curr
vertex.prev = curr.prev
vertex.next.prev = vertex
vertex.prev.next = vertex
#print "inserted",vertex
def next(self, v):
"""Return the next non intersecting vertex after the one specified."""
c = v
while c.intersect:
c = c.next
return c
@property
def first_intersect(self):
"""Return the first unchecked intersection point in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
break
return v
@property
def points(self):
"""Return the polygon's points as a list of tuples (ordered coordinates pair)."""
p = []
for v in self.iter():
p.append((v.x, v.y))
return p
def unprocessed(self):
"""Check if any unchecked intersections remain in the polygon."""
for v in self.iter():
if v.intersect and not v.checked:
yield True
def union(self, clip):
return self.clip(clip, False, False)
def intersect(self, clip):
return self.clip(clip, True, True)
def difference(self, clip):
return self.clip(clip, False, True)
def clip(self, clip, s_entry, c_entry):
"""Clip this polygon using another one as a clipper.
This is where the algorithm is executed. It allows you to make
a UNION, INTERSECT or DIFFERENCE operation between two polygons.
Given two polygons A, B the following operations may be performed:
A|B ... A OR B (Union of A and B)
A&B ... A AND B (Intersection of A and B)
A\B ... A - B
B\A ... B - A
The entry records store the direction the algorithm should take when
it arrives at that entry point in an intersection. Depending on the
operation requested, the direction is set as follows for entry points
(f=forward, b=backward; exit points are always set to the opposite):
Entry
A B
-----
A|B b b
A&B f f
A\B b f
B\A f b
f = True, b = False when stored in the entry record
"""
# detect clip mode
unionmode = not s_entry and not c_entry
intersectionmode = s_entry and c_entry
differencemode = not s_entry and c_entry
# prep by removing repeat of startpoint at end
first = self.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
first = clip.first
last = first.prev
if last.x == first.x and last.y == first.y:
first.prev = last.prev
last.prev.next = first
# TODO: maybe also remove repeat points anywhere?
# ...
# phase one - find intersections
# ------------------------------
anyintersection = False
s_intsecs = []
c_intsecs = []
for s in self.iter(): # for each vertex Si of subject polygon do
for c in clip.iter(): # for each vertex Cj of clip polygon do
try:
#print "find isect %s - %s and %s - %s" %(s.xy, self.next(s.next).xy, c.xy, clip.next(c.next).xy )
i, alphaS, alphaC = intersect_or_on(s, self.next(s.next),
c, clip.next(c.next))
iS = Vertex(i, alphaS, intersect=True, entry=False)
iC = Vertex(i, alphaC, intersect=True, entry=False)
iS.neighbour = iC
iC.neighbour = iS
s_intsecs.append( (iS, alphaS, s, self.next(s.next)) )
c_intsecs.append( (iC, alphaC, c, clip.next(c.next)) )
anyintersection = True
except TypeError:
pass # this simply means intersect() returned None
# insert intersections into originals
for iS,a,s,s_next in reversed(s_intsecs):
if a == 0:
self.replace(s, iS)
elif a == 1:
self.replace(s_next, iS)
else:
self.insert(iS, s, s_next)
for iC,a,c,c_next in reversed(c_intsecs):
if a == 0:
self.replace(c, iC)
elif a == 1:
self.replace(c_next, iC)
else:
clip.insert(iC, c, c_next)
#print "testing if insert was done correctly"
for s in self.iter():
#print s
pass
#print "and"
for c in clip.iter():
#print c
pass
# phase one and a half - no intersections between subject and clip, so correctly return results
# --------------------
def specialcase_insidetest():
resultpolys = []
if unionmode: # union
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so just return subject shell
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so just return clip shell
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so return both
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif intersectionmode: # intersection
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the intersection is only the clip polygon
clipped = Polygon()
for c in clip.iter():
clipped.add(Vertex(c))
polytuple = (clipped, [])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so the intersection is only the subject polygon
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
else:
#clip polygon is entirely outside subject, so no intersection to return
pass
elif differencemode: # difference
if clip.first.isInside(self):
# clip polygon is entirely inside subject, so the difference is subject with clip as a hole
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
hole = Polygon()
for c in clip.iter():
hole.add(Vertex(c))
polytuple = (clipped, [hole])
resultpolys.append(polytuple)
elif self.first.isInside(clip):
# subject polygon is entirely inside clip, so there is no difference
pass
else:
#clip polygon is entirely outside subject, so difference is simply the subject
clipped = Polygon()
for s in self.iter():
clipped.add(Vertex(s))
polytuple = (clipped, [])
resultpolys.append(polytuple)
# no need to continue so just return result
return resultpolys
if not anyintersection:
return specialcase_insidetest()
# phase two - identify entry/exit points
# --------------------------------------
# From K&K
def mark_flags(poly, c, c_entry):
"c and c_entry are not actually the clip, can be for both s and c, just too lazy to change."
#print "intersection"
#print "\t",c
# intersection is degenerate, is the start/endpoint of a line
# so maybe delete intersection flag based on prev/next locations
prevloc = testLocation(c.prev, poly)
nextloc = testLocation(c.next, poly)
if prevloc == "on" or nextloc == "on":
prevmid = Vertex(((c.x+c.prev.x)/2.0,(c.y+c.prev.y)/2.0))
prevloc = testLocation(prevmid, poly)
nextmid = Vertex(((c.x+c.next.x)/2.0,(c.y+c.next.y)/2.0))
nextloc = testLocation(nextmid, poly)
if prevloc == "in" or nextloc == "in":
poly.anyinside = True
#print "\t %s -> degenintsec -> %s" %(prevloc,nextloc)
if prevloc == "out":
if nextloc == "out":
#just touching
c.entry = "en/ex" if c_entry else "ex/en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
elif nextloc == "on":
c.entry = "en" if c_entry else "ex"
elif prevloc == "in":
#union and difference should never go inside the other polygon
#so this should only happen for intersectmode...
if nextloc == "in":
#just touching
c.entry = "ex/en" if c_entry else "en/ex"
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "on":
c.entry = "ex" if c_entry else "en"
elif prevloc == "on":
if nextloc == "on":
c.entry = None
elif nextloc == "out":
c.entry = "ex" if c_entry else "en"
elif nextloc == "in":
c.entry = "en" if c_entry else "ex"
self.anyinside = False
# set clip
prevsingle = None
for c in clip.iter():
if c.intersect:
mark_flags(self, c, c_entry)
# set couple
if c.entry in ("ex","en"):
if prevsingle and c.entry == prevsingle.entry:
c.couple = prevsingle
prevsingle.couple = c
prevsingle = c
# set crosschange
# some modifications based on implementation in qt clipper source code
#if c.entry == "en/ex" == c.neighbour.entry or c.entry == "ex/en" == c.neighbour.entry:
if False: #c.entry == "en/ex" or c.entry == "ex/en":
print "Maybe crosschange..."
# tri1
#a,b,c = c.neighbour.prev, c.prev, c.neighbour.next
a,b,c = c.neighbour.next, c.prev, c.neighbour.prev
dir1 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
# tri2
#a,b,c = c.neighbour.prev, c.prev, c.next
a,b,c = c.next, c.prev, c.neighbour.prev
dir2 = 0.5 * (a.x * (b.y-c.y) +
b.x * (c.y-a.y) +
c.x * (a.y-b.y))
print dir1,dir2
#if dir1 < 0 != dir2 < 0: # different orientation
if (dir1 * dir2) < 0: # different orientation means at least one negative, making the results less than 0
print "CROSSCHANGE!!!"
c.cross_change = True
c.neighbour.cross_change = True # not sure if should set neighbour too
# maybe early abort
if not self.anyinside and intersectionmode:
return []
# what about perfect overlap???
# ...
if False: #DEBUG:
print "view clip entries"
for c in clip.iter():
print c, c.entry
# find first isect where both neighbours have valid flag
for c in clip.iter():
if c.entry:
s = c.neighbour
mark_flags(clip, s, s_entry)
if s.entry:
first_c = c
first_s = s
# print 777,s.entry
break
else:
return specialcase_insidetest()
#raise Exception("weird special case, no neighbours that both have flag left")
# autoset subj, if neighbour of first is different, then set all as opposite
# TODO: how deal with s_entry in case of different modes...?
print "view first"
print first_c, first_c.entry
print first_s, first_s.entry
if first_c.entry != first_s.entry: # and s_entry: # this is the behaviour for standard intersect mode, otherwise flip, hence the s_entry
for c in clip.iter():
if c.entry:
if c.entry == "en": c.neighbour.entry = "ex"
elif c.entry == "ex": c.neighbour.entry = "en"
elif c.entry == "en/ex": c.neighbour.entry = "ex/en"
elif c.entry == "ex/en": c.neighbour.entry = "en/ex"
# else set all same
else:
for c in clip.iter():
if c.entry:
c.neighbour.entry = c.entry
# set couple for subj (not sure if needed)
prevsingle = None
for s in self.iter():
if s.entry:
if s.entry in ("ex","en"):
if prevsingle and s.entry == prevsingle.entry:
s.couple = prevsingle
prevsingle.couple = s
prevsingle = s
if False: #DEBUG:
print "view subj entries"
for s in self.iter():
print s, s.entry
# phase three - construct a list of clipped polygons
# --------------------------------------------------
######
# Defs
def next_unprocessed(vert):
origvert = vert
while vert:
if vert.entry and not (vert.checked or vert.neighbour.checked):
#print "vert, found next unproc", vert, vert.checked, vert.neighbour.checked
if vert.couple:
# rule 1
if vert.couple.entry and vert.entry:
# rule 2
if vert.couple.entry == "en" and vert.entry == "en":
return vert.couple
elif vert.couple.entry == "ex" and vert.entry == "ex":
return vert
# rule 3
else:
return vert
vert = vert.next
if vert == origvert:
# if returned to first, return None
return None
def DeleteFlag1(cur, stat):
if cur.entry == "en/ex":
cur.entry = None
if cur.cross_change:
if stat == "D3":
return "D3"
else:
return "D4"
if stat == "D3":
return "D4"
else:
return "D3"
if cur.entry == "ex/en":
if stat == "D3":
cur.entry = "en"
return "D2"
else:
cur.entry = "ex"
return "D1"
if cur.entry == "en":
cur.entry = None
return "D1"
if cur.entry == "ex":
cur.entry = None
return "D2"
def DeleteFlag2(cur, prev, stat):
if cur.entry == "en/ex":
if stat == "D1":
cur.entry = "ex"
else:
cur.entry = "en"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex/en":
if stat == "D1":
cur.entry = "en"
else:
cur.entry = "ex"
if cur.cross_change:
if stat == "D1":
return "D4"
else:
return "D3"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "en":
cur.entry = None
if stat == "D1" and cur.couple and prev.couple == cur:
return "D1"
if stat == "D1":
return "D3"
else:
return "D4"
if cur.entry == "ex":
cur.entry = None
if stat != "D1" and cur.couple and prev.couple == cur:
return "D2"
else:
if stat == "D1":
return "D3"
else:
return "D4"
def proceed(cur, stat):
cur.checked = True
if stat == "D1":
clipped.add(Vertex(cur))
return cur.next
elif stat == "D2":
clipped.add(Vertex(cur))
return cur.prev
else:
return cur.neighbour
####
resultpolys = []
self.first.checked = True
cur = prev = start = next_unprocessed(self.first)
while cur:
# each new polygon
print "new poly"
stat = DeleteFlag1(cur, "D3")
if DEBUG: print "v", cur, cur.entry, stat
clipped = Polygon()
cur = proceed(cur, stat)
# collect vertexes
while cur != start:
if DEBUG: print "v", cur, cur.entry, stat
if cur.entry:
if stat == "D1" or stat == "D2":
stat = DeleteFlag2(cur, prev, stat)
else:
stat = DeleteFlag1(cur, stat)
prev = cur
cur = proceed(cur, stat)
# return to first vertex
clipped.add(Vertex(clipped.first))
print clipped
resultpolys.append((clipped,[]))
cur = prev = start = next_unprocessed(self.first)
# finally, sort into exteriors and holes
for pindex,(polyext,polyholes) in enumerate(resultpolys):
for otherext,otherholes in resultpolys:
if polyext == otherext:
continue # don't compare to self
if polyext.first.isInside(otherext):
otherholes.append(polyext) #poly is within other so make into a hole
del resultpolys[pindex] #and delete poly from being an independent poly
return resultpolys
def __repr__(self):
"""String representation of the polygon for debugging purposes."""
count, out = 1, "\n"
for s in self.iter():
out += "%02d: %s\n" % (count, str(s))
count += 1
return out
def iter(self):
"""Iterator generator for this doubly linked list."""
s = self.first
while True:
yield s
s = s.next
if s == self.first:
return
def intersect_or_on(s1, s2, c1, c2):
"""Same as intersect(), except returns
intersection even if degenerate.
"""
den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )
if not den:
return None
us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den
uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den
if (0 <= us <= 1) and (0 <= uc <= 1):
#subj and clip line intersect eachother somewhere in the middle
#this includes the possibility of degenerates (edge intersections)
x = s1.x + us * (s2.x - s1.x)
y = s1.y + us * (s2.y - s1.y)
return (x, y), us, uc
else:
return None
def testLocation(point, polygon):
"""
Effective scanline test for the location of a point vis a vis a polygon.
Returns either "in","on",or "out".
Based on algorithm 7 from:
Kai Horman and Alexander Agathos,
"The point in polygon problem for arbitrary polygons".
Computational Geometry: Theory and Applications,
Volume 20 Issue 3, November 2001
"""
# begin
if polygon.first.y == point.y and polygon.first.x == point.x:
return "on" # vertex
w =0
for v in polygon.iter():
if v.next.y == point.y:
if v.next.x == point.x:
return "on" # vertex
else:
if v.y == point.y and (v.next.x > point.x) == (v.x < point.x):
return "on" # edge
# if crossing horizontal line
if (v.y < point.y and v.next.y >= point.y)\
or (v.y >= point.y and v.next.y < point.y):
if v.x >= point.x:
if v.next.x > point.x:
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
else:
if v.next.x > point.x:
det = (v.x - point.x) * (v.next.y - point.y) \
- (v.next.x - point.x) * (v.y - point.y)
if det == 0: return "on" # edge
# if right crossing
if (det > 0 and v.next.y > v.y)\
or (det < 0 and v.next.y < v.y):
# modify w
if v.next.y > v.y: w += 1
else: w -= 1
if (w % 2) != 0:
return "in"
else:
return "out"
def clip_polygon(subject, clipper, operation = 'difference'):
"""
Higher level function for clipping two polygons (from a list of points).
Since input polygons are lists of points, output is also in list format.
Each polygon in the resultlist is a tuple of: (polygon exterior, list of polygon holes)
"""
Subject = Polygon()
Clipper = Polygon()
for s in subject:
Subject.add(Vertex(s))
for c in clipper:
Clipper.add(Vertex(c))
clipped = Clipper.difference(Subject)\
if operation == 'reversed-diff'\
else Subject.__getattribute__(operation)(Clipper)
clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]
return clipped
if __name__ == "__main__":
"""
Test and visualize various polygon overlap scenarios.
Visualization requires the pure-Python PyDraw library from
https://github.com/karimbahgat/PyDraw
"""
subjpoly = [(0,0),(6,0),(6,6),(0,6),(0,0)]
# normal intersections
testpolys_normal = {"simple overlap":
[(4,4),(10,4),(10,10),(4,10),(4,4)],
"jigzaw overlap":
[(1,4),(3,8),(5,4),(5,10),(1,10),(1,4)],
## "smaller, outside":
## [(7,7),(7,9),(9,9),(9,7),(7,7)],
## "smaller, inside":
## [(2,2),(2,4),(4,4),(4,2),(2,2)],
## "larger, covering all":
## [(-1,-1),(-1,7),(7,7),(7,-1),(-1,-1)],
## "larger, outside":
## [(-10,-10),(-10,-70),(-70,-70),(-70,-10),(-10,-10)]
}
# degenerate intersections
testpolys_degens = {"degenerate, starts on edge intersection and goes inside":
[(0,5),(6,4),(10,4),(10,10),(4,10),(0,5)],
## "degenerate, starts on edge intersection and goes outside":
## [(5,6),(5.2,5.5),(5,5.4),(4.8,5.5)],
"degenerate, hesitating to enter and exit":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1,6),(1,5)],
"degenerate, also multiple degens along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.3,6),(1.6,6),(1,6),(1,5)],
"degenerate, back and forth on-out along shared line":
[(1,5),(6,4),(6,5),(10,4),(10,10),(4,10),(2,6),(1.5,5.7),(1,6),(0,6),(1,5)]
}
# nextto/almost copy special cases
testpolys_nextto_almostsame = {"degenerate, perfect overlap":
[(0,0),(6,0),(6,6),(0,6),(0,0)],
"degenerate, partial inside overlap":
[(1,0),(6,0),(6,6),(1,6),(1,0)],
"degenerate, right next to eachother":
[(0,6),(6,6),(6,10),(0,10),(0,6)],
"degenerate, partial right next to eachother":
[(2,6),(6,6),(6,10),(2,10),(2,6)]
}
#run operation
import os
import time
import pydraw
DEBUG = False
# test geo
## def test_draw(testname, subjpoly, clippoly, mode):
## t = time.time()
## #print testname, mode
## resultpolys = clip_polygon(subjpoly,clippoly,mode)
## print "finished:",len(resultpolys),time.time()-t
## print "start",str(resultpolys)[:100]
## print "end",str(resultpolys)[-100:]
## crs = pydraw.CoordinateSystem([0,80,45,50])
## img = pydraw.Image(300,300, crs=crs)
## img.drawpolygon(subjpoly, fillcolor=(222,0,0,111))
## img.drawpolygon(clippoly, fillcolor=(0,222,0,111))
## for ext,holes in resultpolys:
## img.drawpolygon(ext,holes)
## img.drawgridticks(10,10)
## img.save("test_output/"+testname+"-"+mode+".png")
##
## import pygeoj
## world = pygeoj.load("cshapes.geo.json")
## norw = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Norway")
## swed = next(cntr.geometry.coordinates[0][0] for cntr in world if cntr.properties["CNTRY_NAME"] == "Sweden")
## test_draw("norway-sweden", norw, swed, "difference")
##
## breakonpurpose
# test basics
def test_draw(testname, subjpoly, clippoly, mode):
t = time.time()
#print testname, mode
resultpolys = clip_polygon(subjpoly,clippoly,mode)
#print "finished:",resultpolys,time.time()-t
crs = pydraw.CoordinateSystem([-1,-1,11,11])
img = pydraw.Image(300,300, crs=crs)
img.drawpolygon(subjpoly, fillcolor=(222,0,0))
img.drawpolygon(clippoly, fillcolor=(0,222,0))
for ext,holes in resultpolys:
img.drawpolygon(ext,holes)
img.drawgridticks(1,1)
img.save("test_output/"+testname+"-"+mode+".png")
if not os.path.lexists("test_output"): os.mkdir("test_output")
for testname,testclip in testpolys_normal.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_degens.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
for testname,testclip in testpolys_nextto_almostsame.items():
print testname
for mode in ("intersect","union","difference"):
print mode
test_draw(testname, subjpoly, testclip, mode)
| gpl-3.0 | 691,236,750,043,469,000 | 37.399792 | 191 | 0.495308 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdecore/KEncodingDetector.py | 1 | 2735 | # encoding: utf-8
# module PyKDE4.kdecore
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdecore.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtNetwork as __PyQt4_QtNetwork
class KEncodingDetector(): # skipped bases: <type 'sip.wrapper'>
# no doc
def analyze(self, *args, **kwargs): # real signature unknown
pass
def autoDetectLanguage(self, *args, **kwargs): # real signature unknown
pass
def decode(self, *args, **kwargs): # real signature unknown
pass
def decodedInvalidCharacters(self, *args, **kwargs): # real signature unknown
pass
def decoder(self, *args, **kwargs): # real signature unknown
pass
def decodeWithBuffering(self, *args, **kwargs): # real signature unknown
pass
def encoding(self, *args, **kwargs): # real signature unknown
pass
def encodingChoiceSource(self, *args, **kwargs): # real signature unknown
pass
def errorsIfUtf8(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def hasAutoDetectionForScript(self, *args, **kwargs): # real signature unknown
pass
def nameForScript(self, *args, **kwargs): # real signature unknown
pass
def processNull(self, *args, **kwargs): # real signature unknown
pass
def resetDecoder(self, *args, **kwargs): # real signature unknown
pass
def scriptForName(self, *args, **kwargs): # real signature unknown
pass
def setAutoDetectLanguage(self, *args, **kwargs): # real signature unknown
pass
def setEncoding(self, *args, **kwargs): # real signature unknown
pass
def visuallyOrdered(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Arabic = 2
AutoDetectedEncoding = 1
AutoDetectScript = None # (!) real value is ''
Baltic = 3
BOM = 2
CentralEuropean = 4
ChineseSimplified = 5
ChineseTraditional = 6
Cyrillic = 7
DefaultEncoding = 0
EncodingChoiceSource = None # (!) real value is ''
EncodingFromHTTPHeader = 5
EncodingFromMetaTag = 4
EncodingFromXMLHeader = 3
Greek = 8
Hebrew = 9
Japanese = 10
Korean = 11
None = 0
NorthernSaami = 12
SemiautomaticDetection = 1
SouthEasternEurope = 13
Thai = 14
Turkish = 15
Unicode = 16
UserChosenEncoding = 6
WesternEuropean = 17
| gpl-2.0 | 1,633,415,797,107,800,600 | 25.553398 | 101 | 0.643876 | false |
tamland/wimpy | docs/conf.py | 1 | 7997 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tidalapi'
copyright = u'2014, Thomas Amland'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = '0.5.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tidalapidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
#latex_documents = []
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = []
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = []
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| lgpl-3.0 | 3,739,961,892,431,767,000 | 30.116732 | 76 | 0.709391 | false |
mph-/lcapy | lcapy/nexpr.py | 1 | 7914 | """This module provides the DiscreteTimeDomainExpression class to
represent discrete-time expressions.
Copyright 2020--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import DiscreteTimeDomain
from .sequence import Sequence
from .functions import exp
from .sym import j, oo, pi, fsym, oo
from .dsym import nsym, ksym, zsym, dt
from .ztransform import ztransform
from .dft import DFT
from .seqexpr import SequenceExpression
from .nseq import DiscreteTimeDomainSequence, nseq
from sympy import Sum, summation, limit, DiracDelta
__all__ = ('nexpr', )
class DiscreteTimeDomainExpression(DiscreteTimeDomain, SequenceExpression):
"""Discrete-time expression or symbol."""
var = nsym
seqcls = DiscreteTimeDomainSequence
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
if 'integer' not in assumptions:
assumptions['real'] = True
super(DiscreteTimeDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(zsym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on z' % expr)
if check and expr.has(ksym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on k' % expr)
def _mul_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def _div_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def as_expr(self):
return DiscreteTimeDomainExpression(self)
def differentiate(self):
"""First order difference."""
result = (self.expr - self.subs(n - 1).expr) / dt
return self.__class__(result, **self.assumptions)
def integrate(self):
"""First order integration."""
from .sym import symsymbol
from .utils import factor_const
from .extrafunctions import UnitImpulse
from .functions import u
# TODO, get SymPy to optimize this case.
expr = self.expr
const, expr = factor_const(expr, nsym)
if expr.is_Function and expr.func == UnitImpulse:
return dt * u(expr.args[0]) * const
msym = symsymbol('m', integer=True)
result = dt * summation(self.subs(msym).expr, (msym, -oo, nsym))
return self.__class__(result, **self.assumptions)
def ztransform(self, evaluate=True, **assumptions):
"""Determine one-sided z-transform."""
assumptions = self.assumptions.merge_and_infer(self, **assumptions)
result = ztransform(self.expr, self.var, zsym, evaluate)
return self.change(result, domain='Z', **assumptions)
def ZT(self, **assumptions):
return self.ztransform(**assumptions)
def plot(self, ni=None, **kwargs):
"""Plot the sequence. If `ni` is not specified, it defaults to the
range (-20, 20). `ni` can be a vector of specified sequence
indices, a tuple specifing the range, or a constant specifying
the maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned.
"""
if ni is None:
ni = (-20, 20)
from .plot import plot_sequence
return plot_sequence(self, ni, **kwargs)
def initial_value(self):
"""Determine value at n = 0."""
return self.subs(0)
def final_value(self):
"""Determine value at n = oo."""
return self.__class__(limit(self.expr, self.var, oo))
def DFT(self, N=None, evaluate=True):
if N is None:
from .sym import symsymbol
N = symsymbol('N', integer=True, positive=True)
result = DFT(self.expr, nsym, ksym, N, evaluate=evaluate)
return self.change(result, domain='discrete fourier')
def delay(self,m):
"""Delay signal by m samples."""
return self.subs(n - m)
def extent(self, n1=-100, n2=100):
"""Determine extent of the signal.
For example, nexpr([1, 1]).extent() = 2
nexpr([1, 0, 1]).extent() = 3
nexpr([0, 1, 0, 1]).extent() = 3
This performs a search between n=n1 and n=n2."""
return self.seq((n1, n2)).extent()
def discrete_time_fourier_transform(self, var=None,
images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
Use `images = 0` to avoid the infinite number of spectral images.
"""
return self.DTFT(var, images, **assumptions)
def DTFT(self, var=None, images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
By default this returns the DTFT in terms of `f`. Use
`.DTFT(w)` to get the angular frequency form, `.DTFT(F)` to
get the normalised frequency form, or `.DTFT(W)` to get the
normalised angular frequency form.
Use `images = 0` to avoid the infinite number of spectral images.
"""
from .extrafunctions import UnitStep
from .symbols import f, omega, Omega, F
from .fexpr import fexpr
from .dtft import DTFT
if var is None:
var = f
if id(var) not in (id(f), id(F), id(omega), id(Omega)):
raise ValueError('DTFT requires var to be f, F, omega, or Omega`, not %s' % var)
dtft = DTFT(self.expr, self.var, fsym, images=images)
result = fexpr(dtft)(var)
result = result.simplify_dirac_delta()
result = result.simplify_heaviside()
result = result.simplify_rect()
# There is a bug in SymPy when simplifying Sum('X(n - m)', (m, -oo, oo))
# result = result.simplify()
result = result.cancel_terms()
return result
def norm_angular_fourier(self, **assumptions):
from .normomegaexpr import Omega
return self.DTFT()(Omega)
def difference_equation(self, inputsym='x', outputsym='y', form='iir'):
"""Create difference equation from impulse response.
`form` can be 'fir' or 'iir' ('direct form I').
"""
H = self.ZT()
return H.difference_equation(inputsym, outputsym, form)
def remove_condition(self):
"""Remove the piecewise condition from the expression."""
if not self.is_conditional:
return self
expr = self.expr
expr = expr.args[0].args[0]
return self.__class__(expr)
def nexpr(arg, **assumptions):
"""Create nExpr object. If `arg` is nsym return n"""
from .expr import Expr
from .seq import seq
if arg is nsym:
return n
if isinstance(arg, Expr):
if assumptions == {}:
return arg
return arg.__class__(arg, **assumptions)
if isinstance(arg, str) and arg.startswith('{'):
return nseq(arg)
from numpy import ndarray
if isinstance(arg, (list, ndarray)):
return DiscreteTimeDomainSequence(arg, var=n).as_impulses()
return DiscreteTimeDomainExpression(arg, **assumptions)
from .expressionclasses import expressionclasses
expressionclasses.register('discrete time', DiscreteTimeDomainExpression)
n = DiscreteTimeDomainExpression('n', integer=True)
| lgpl-2.1 | 6,243,116,347,179,922,000 | 30.15748 | 92 | 0.604372 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_default_security_rules_operations.py | 1 | 8844 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DefaultSecurityRulesOperations:
"""DefaultSecurityRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs
) -> AsyncIterable["_models.SecurityRuleListResult"]:
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
default_security_rule_name: str,
**kwargs
) -> "_models.SecurityRule":
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security rule.
:type default_security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
| mit | 489,527,905,690,203,970 | 48.685393 | 236 | 0.655133 | false |
idiap/rgbd | Processing/Processor.py | 1 | 3672 | """
Copyright (c) 2014 Idiap Research Institute, http://www.idiap.ch/
Written by Kenneth Funes <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
from rgbd.Streaming.RGBDStreamer import RGBDStreamer
from rgbd.Rendering.RGBDViewer import RGBDViewer
from PySide import QtCore, QtGui
import sys
class Processor(QtCore.QObject):
"""
Class to handle a stream of RGB-D data and to display it
"""
newFrameSignal = QtCore.Signal()
def __init__(self, rendering=True):
super(Processor, self).__init__()
self.connected=False
self.viewer=None
self.streamer=None
self.frame_callback=None
self.rendering = rendering
self.count = 0
self.app = None
def __del__(self):
self.stop()
def createGUI(self):
# Creates the widget where to visualize the RGB-D data
self.viewer = RGBDViewer(self.app, render3D=self.rendering, close_callback=self.stop)
self.viewer.show()
self.connect(self.viewer.pauseButton, QtCore.SIGNAL("clicked()"), self.pause)
self.newFrameSignal.connect(self.processFrame)
def pause(self):
"""
Toggle the pause status
"""
self.streamer.pause(not self.streamer.paused)
def run(self, source=0, calibrationFile=None, frame_callback =None):
# Sets the function to be called each time a new frame data is available
self.frame_callback=frame_callback
# Creates the RGB-D data streaming class
self.streamer=RGBDStreamer(frame_callback=self.newFrameSignal.emit, connection_callback=self.connectionUpdate, calibrate = True) # frame_callback=self.newFrame
self.streamer.connect(source, calibrationFile)
# Now create the Qt Application (basically for the Qt events loop)
self.app = QtGui.QApplication(sys.argv)
# Creates the necessary GUI
self.createGUI()
# Puts the streamer to run freely
self.streamer.pause(False)
# Runs the Qt Loop
sys.exit(self.app.exec_())
def stop(self):
"""
Stops the process of data generation
"""
if self.streamer is not None:
self.streamer.pause(True)
self.streamer.disconnect()
self.streamer.kill()
self.streamer=None
def connectionUpdate(self, state=False):
self.connected = state
def processFrame(self):
"""
This function is called within the Qt events loop, as response tot he newFrameSignal activation
"""
if self.streamer is None:
return
data = self.streamer.popFrame()
if data is not None:
self.frame, self.frameIndex = data
self.newFrameAvailable = False
if self.frame is not None:
if self.frame_callback is not None:
self.frame_callback(self.frame, self.frameIndex)
self.viewer.setNewData(self.frame, self.frameIndex)
else:
self.frameMesh, self.state = None, None
| lgpl-3.0 | 6,560,452,943,276,382,000 | 35 | 168 | 0.659858 | false |
jhartnett/ipCounter | src/ipCounter.py | 1 | 1779 | #!/bin/python
#/**************************************************************************
#* File: ipCounter.py
#*
#* This is a basic program to count the total number of IPs
#* in a given range. Input is a txt formatted file similar
#* to the sample provided
#*
#* This updated version uses Python to make it more agnostic.
#* Author: Hon1nbo
#***************************************************************************/
import math
import sys
fileName = sys.argv[1]
ipCount = 0
ipList = ""
# Open the file & read contents in
with open(fileName) as ipListFile:
ipList = ipListFile.read()
tmpTuple = ('not','\n','null')
tmpTupleSmall = ('not',' ','null')
tmpCIDR = 0
tmpTuple = ipList.split("\n")
entriesCount = len(tmpTuple)
x = 0
while (entriesCount - x) != 0:
tmpTupleSmall = tmpTuple[x].partition("/")
if tmpTupleSmall[2] != "":
tmpCount = math.pow(2, (32-int(tmpTupleSmall[2])))
print(tmpTuple[x],": ",int(tmpCount))
ipCount += tmpCount
else:
tmpTupleSmall = tmpTuple[x].partition("-");
if tmpTupleSmall[1] == "-":
startIP = tmpTupleSmall[0].split(".") # start IP
endIP = tmpTupleSmall[2].split(".") # end IP
tmpCount = 0
for octet in range (0,4):
# Compare each octet one by one based on iteration
difference = int(endIP[octet]) - int(startIP[octet])
# account for the inclusion of smaller number
# ex) 192.168.1.0-192.168.1.255
if difference != 0:
difference += 1
# 256 addresses in each octet, raise to power to do maths
tmpCount += (difference * pow(256, (3 - octet)))
print(tmpTuple[x],": ",int(tmpCount))
ipCount += tmpCount
else:
print(tmpTuple[x],": 1")
ipCount += 1
x += 1
print('iterated through ',int(x),' entries')
print('Total IPs Counted: ',int(ipCount))
| apache-2.0 | 33,192,525,953,773,740 | 23.708333 | 77 | 0.588533 | false |
dunkhong/grr | grr/test_lib/db_test_lib.py | 1 | 2046 | #!/usr/bin/env python
"""Test utilities for RELDB-related testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import sys
import mock
from grr_response_core.lib.util import compatibility
from grr_response_server import data_store
from grr_response_server.databases import db as abstract_db
from grr_response_server.databases import db_test_mixin
from grr_response_server.databases import mem
from grr_response_server.databases import mysql_test
def TestDatabases(mysql=True):
"""Decorator that creates additional RELDB-enabled test classes."""
def _TestDatabasesDecorator(cls):
"""Decorator that creates additional RELDB-enabled test classes."""
module = sys.modules[cls.__module__]
cls_name = compatibility.GetName(cls)
# Prevent MRO issues caused by inheriting the same Mixin multiple times.
base_classes = ()
if not issubclass(cls, db_test_mixin.GlobalDatabaseTestMixin):
base_classes += (db_test_mixin.GlobalDatabaseTestMixin,)
if mysql:
db_test_cls_name = "{}_MySQLEnabled".format(cls_name)
db_test_cls = compatibility.MakeType(
name=db_test_cls_name,
base_classes=base_classes +
(mysql_test.MySQLDatabaseProviderMixin, cls),
namespace={})
setattr(module, db_test_cls_name, db_test_cls)
return cls
return _TestDatabasesDecorator
def WithDatabase(func):
"""A decorator for database-dependent test methods.
This decorator is intended for tests that need to access database in their
code. It will also augment the test function signature so that the database
object is provided and can be manipulated.
Args:
func: A test method to be decorated.
Returns:
A database-aware function.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
db = abstract_db.DatabaseValidationWrapper(mem.InMemoryDB())
with mock.patch.object(data_store, "REL_DB", db):
func(*(args + (db,)), **kwargs)
return Wrapper
| apache-2.0 | 5,616,811,757,687,994,000 | 29.537313 | 77 | 0.721896 | false |
fja05680/pinkfish | examples/310.cryptocurrencies/strategy.py | 1 | 6833 | """
The SMA-ROC-portfolio stategy.
This is SMA-ROC strategy applied to a portfolio.
SMA-ROC is a rate of change calculation smoothed by
a moving average.
This module allows us to examine this strategy and try different
period, stop loss percent, margin, and whether to use a regime filter
or not. We split up the total capital between the symbols in the
portfolio and allocate based on either equal weight or volatility
parity weight (inverse volatility).
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
# A custom indicator to use in this strategy.
def SMA_ROC(ts, mom_lookback=1, sma_timeperiod=20, price='close'):
""" Returns a series which is an SMA with of a daily MOM. """
mom = pf.MOMENTUM(ts, lookback=mom_lookback, time_frame='daily', price=price)
sma_mom = SMA(mom, timeperiod=sma_timeperiod)
return sma_mom
default_options = {
'use_adj' : False,
'use_cache' : True,
'stock_market_calendar' : False,
'stop_loss_pct' : 1.0,
'margin' : 1,
'lookback' : 1,
'sma_timeperiod': 20,
'sma_pct_band': 0,
'use_regime_filter' : True,
'use_vola_weight' : False
}
class Strategy:
def __init__(self, symbols, capital, start, end, options=default_options):
self.symbols = symbols
self.capital = capital
self.start = start
self.end = end
self.options = options.copy()
self.ts = None
self.rlog = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
pf.TradeLog.margin = self.options['margin']
# Create a stop_loss dict for each symbol.
stop_loss = {symbol:0 for symbol in self.portfolio.symbols}
# stop loss pct should range between 0 and 1, user may have
# expressed this as a percentage 0-100
if self.options['stop_loss_pct'] > 1:
self.options['stop_loss_pct'] /= 100
upper_band = self.options['sma_pct_band']/1000
lower_band = -self.options['sma_pct_band']/1000
# Loop though timeseries.
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
end_flag = pf.is_last_row(self.ts, i)
# Get the prices for this row, put in dict p.
p = self.portfolio.get_prices(row,
fields=['close', 'regime', 'sma_roc', 'vola'])
# Sum the inverse volatility for each row.
inverse_vola_sum = 0
for symbol in self.portfolio.symbols:
inverse_vola_sum += 1 / p[symbol]['vola']
# Loop though each symbol in portfolio.
for symbol in self.portfolio.symbols:
# Use variables to make code cleaner.
close = p[symbol]['close']
regime = p[symbol]['regime']
sma_roc = p[symbol]['sma_roc']
inverse_vola = 1 / p[symbol]['vola']
# Sell Logic
# First we check if an existing position in symbol should be sold
# - sell sma_roc < 0
# - sell if price closes below stop loss
# - sell if end of data by adjusted the percent to zero
if symbol in self.portfolio.positions:
if sma_roc < lower_band or close < stop_loss[symbol] or end_flag:
if close < stop_loss[symbol]: print('STOP LOSS!!!')
self.portfolio.adjust_percent(date, close, 0, symbol, row)
# Buy Logic
# First we check to see if there is an existing position, if so do nothing
# - Buy if (regime > 0 or not use_regime_filter) and sma_roc > 0
else:
if (regime > 0 or not self.options['use_regime_filter']) and sma_roc > upper_band:
# Use volatility weight.
if self.options['use_vola_weight']:
weight = inverse_vola / inverse_vola_sum
# Use equal weight.
else:
weight = 1 / len(self.portfolio.symbols)
self.portfolio.adjust_percent(date, close, weight, symbol, row)
# Set stop loss
stop_loss[symbol] = (1-self.options['stop_loss_pct'])*close
# record daily balance
self.portfolio.record_daily_balance(date, row)
def run(self):
self.portfolio = pf.Portfolio()
self.ts = self.portfolio.fetch_timeseries(self.symbols, self.start, self.end,
fields=['close'], use_cache=self.options['use_cache'],
use_adj=self.options['use_adj'],
dir_name='cryptocurrencies',
stock_market_calendar=self.options['stock_market_calendar'])
# Add technical indicator: 200 sma regime filter for each symbol.
def _crossover(ts, ta_param, input_column):
return pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200,
price=input_column, prevday=False)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_crossover, ta_param=None,
output_column_suffix='regime', input_column_suffix='close')
# Add technical indicator: volatility.
def _volatility(ts, ta_param, input_column):
return pf.VOLATILITY(ts, price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_volatility, ta_param=None,
output_column_suffix='vola', input_column_suffix='close')
# Add techincal indicator: X day SMA_ROC.
def _sma_roc(ts, ta_param, input_column):
return SMA_ROC(ts, mom_lookback=self.options['lookback'],
sma_timeperiod=self.options['sma_timeperiod'],
price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_sma_roc, ta_param=None,
output_column_suffix='sma_roc', input_column_suffix='close')
# Finalize timeseries.
self.ts, self.start = self.portfolio.finalize_timeseries(self.ts, self.start)
# Init trade log objects.
self.portfolio.init_trade_logs(self.ts)
self._algo()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog, self.tlog, self.dbal = self.portfolio.get_logs()
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
| mit | 4,744,775,123,107,647,000 | 36.543956 | 102 | 0.56915 | false |
tasoc/photometry | notes/halo_shift.py | 1 | 2629 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Rasmus Handberg <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sqlite3
import os.path
#------------------------------------------------------------------------------
def mag2flux(mag):
"""
Convert from magnitude to flux using scaling relation from
aperture photometry. This is an estimate.
Parameters:
mag (float): Magnitude in TESS band.
Returns:
float: Corresponding flux value
"""
return 10**(-0.4*(mag - 20.54))
if __name__ == '__main__':
pass
folder = r'C:\Users\au195407\Documents\tess_data_local\S01_DR01-2114872'
conn = sqlite3.connect(os.path.join(folder, 'todo.sqlite'))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT todolist.starid,tmag,onedge,edgeflux FROM todolist INNER JOIN diagnostics ON todolist.priority=diagnostics.priority;")
results = cursor.fetchall()
starid = np.array([row['starid'] for row in results], dtype='int64')
tmag = np.array([row['tmag'] for row in results])
OnEdge = np.array([np.NaN if row['onedge'] is None else row['onedge'] for row in results])
EdgeFlux = np.array([np.NaN if row['edgeflux'] is None else row['edgeflux'] for row in results])
cursor.close()
conn.close()
print(tmag)
print(OnEdge)
print(EdgeFlux)
tmag_limit = 3.0
flux_limit = 1e-3
indx = (OnEdge > 0)
indx_halo = (tmag <= tmag_limit) & (OnEdge > 0) & (EdgeFlux/mag2flux(tmag) > flux_limit)
indx_spec = (starid == 382420379)
print(starid[indx_halo])
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], OnEdge[indx], alpha=0.5)
plt.scatter(tmag[indx_halo], OnEdge[indx_halo], marker='x', c='r')
plt.xlim(xmax=tmag_limit)
plt.ylim(ymin=0)
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(tmag[indx], EdgeFlux[indx], alpha=0.5)
ax.set_xlim(xmax=5.0)
#ax.set_ylim(ymin=0.0)
ax.set_yscale('log')
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], EdgeFlux[indx]/mag2flux(tmag[indx]), alpha=0.5)
plt.scatter(tmag[indx_halo], EdgeFlux[indx_halo]/mag2flux(tmag[indx_halo]), alpha=0.3, marker='x', c='r')
plt.scatter(tmag[indx_spec], EdgeFlux[indx_spec]/mag2flux(tmag[indx_spec]), alpha=0.3, marker='o', c='g', lw=2)
plt.plot([2.0, 6.0], [1e-3, 2e-2], 'r--')
plt.axhline(flux_limit, c='r', ls='--')
plt.axvline(tmag_limit, c='r', ls='--')
#plt.xlim(xmax=tmag_limit)
ax.set_ylim(ymin=1e-5, ymax=1)
ax.set_yscale('log')
ax.set_ylabel('Edge Flux / Expected Total Flux')
ax.set_xlabel('Tmag')
plt.show()
| gpl-3.0 | -1,066,074,733,466,643,000 | 26.103093 | 142 | 0.657284 | false |
CacaoMovil/guia-de-cacao-django | cacao_app/config/common.py | 1 | 11423 | # -*- coding: utf-8 -*-
"""
Django settings for cacao_app project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, dirname, abspath
from configurations import Configuration, values
BASE_DIR = dirname(dirname(abspath(__file__)))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# redirects app
'django.contrib.redirects',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'suitlocale',
'suit',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms',
'allauth',
# 'allauth.account',
'sorl.thumbnail',
'envelope',
'solo',
'django_perseus',
'rest_framework',
'ckeditor',
'widget_tweaks',
'wkhtmltopdf',
'taggit',
'google_cse',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pdf_kit',
'cacao',
'configuracion',
'event',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
# 'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# redirect middleware
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': 'contrib.sites.migrations'
}
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# https://docs.djangoproject.com/en/1.10/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = values.SecretValue()
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = values.SingleNestedTupleValue((
('Alice', 'alice@localhost'),
('Bob', 'bob@localhost'),
))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/cacao_app')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-NI'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See:
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
'context.guia_items',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "account_login"
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Django REST Framework hide API docs
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# END LOGGING CONFIGURATION
# Your common stuff: Below this line define 3rd party library settings
SUIT_CONFIG = {
'ADMIN_NAME': 'Cacao',
'SHOW_REQUIRED_ASTERISK': True,
'CONFIRM_UNSAVED_CHANGES': True,
'MENU': (
{'app': 'cacao', 'label': 'Guias de Cacao', 'icon': 'icon-leaf'},
{'app': 'configuracion', 'icon': 'icon-cog'},
{'app': 'event', 'icon': 'icon-chevron-right'},
{'label': 'Archivos estaticos', 'icon': 'icon-globe', 'models': (
{'label': 'Generar archivos estaticos',
'url': '/admin/static-generator/'},
)},
{'app': 'auth', 'label': 'Usuarios y Grupos', 'icon': 'icon-lock'},
{'app': 'sites', 'icon': 'icon-chevron-right'},
{'app': 'redirects', 'icon': 'icon-repeat'},
),
# misc
'LIST_PER_PAGE': 15,
'HEADER_DATE_FORMAT': 'l, j, F Y',
}
# CKEditor
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
['Undo', 'Redo',
'-', 'Format', 'Bold', 'Italic', 'Underline', 'NumberedList', 'BulletedList', 'Blockquote',
'-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock',
'-', 'Link', 'Unlink', 'Scayt',
'-', 'Cut', 'Copy', 'PasteText',
'-', 'Source', 'Image', 'Iframe',
],
],
'width': 'auto',
'allowedContent': True,
'removePlugins': 'stylesheetparser',
'extraAllowedContent': 'iframe[*]',
},
}
# FB App ID
FB_APP_ID = values.SecretValue()
# GA APP ID
GA_APP_ID = values.SecretValue()
CX_CODE = values.SecretValue()
# used for the views delete folders and open the guide folder
PROJECT_DIR = dirname(dirname(abspath(__file__)))
PERSEUS_BUILD_DIR = '/tmp/perseus/build'
PERSEUS_SOURCE_DIR = '/tmp/perseus/guia'
# config for create pdf's
PDF_KIT_MODEL = 'cacao.Content'
| bsd-3-clause | 4,947,143,118,909,856,000 | 31.177465 | 108 | 0.617526 | false |
SergeyKubrak/django-rosetta | rosetta/polib.py | 1 | 59163 | # -* coding: utf-8 -*-
#
# License: MIT (see LICENSE file provided)
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
"""
**polib** allows you to manipulate, create, modify gettext files (pot, po and
mo files). You can load existing files, iterate through it's entries, add,
modify entries, comments or metadata, etc. or create new po files from scratch.
**polib** provides a simple and pythonic API via the :func:`~polib.pofile` and
:func:`~polib.mofile` convenience functions.
"""
__author__ = 'David Jean Louis <[email protected]>'
__version__ = '1.0.3'
__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
'default_encoding', 'escape', 'unescape', 'detect_encoding', ]
import array
import codecs
import os
import re
import struct
import sys
import textwrap
# the default encoding to use when encoding cannot be detected
default_encoding = 'utf-8'
# python 2/3 compatibility helpers {{{
if sys.version_info[:2] < (3, 0):
PY3 = False
text_type = unicode
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
else:
PY3 = True
text_type = str
def b(s):
return s.encode("latin-1")
def u(s):
return s
# }}}
# _pofile_or_mofile {{{
def _pofile_or_mofile(f, type, **kwargs):
"""
Internal function used by :func:`polib.pofile` and :func:`polib.mofile` to
honor the DRY concept.
"""
# get the file encoding
enc = kwargs.get('encoding')
if enc is None:
enc = detect_encoding(f, type == 'mofile')
# parse the file
kls = type == 'pofile' and _POFileParser or _MOFileParser
parser = kls(
f,
encoding=enc,
check_for_duplicates=kwargs.get('check_for_duplicates', False),
klass=kwargs.get('klass')
)
instance = parser.parse()
instance.wrapwidth = kwargs.get('wrapwidth', 78)
return instance
# }}}
# function pofile() {{{
def pofile(pofile, **kwargs):
"""
Convenience function that parses the po or pot file ``pofile`` and returns
a :class:`~polib.POFile` instance.
Arguments:
``pofile``
string, full or relative path to the po/pot file or its content (data).
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was passed
to xgettext (optional, default: ``78``).
``encoding``
string, the encoding to use (e.g. "utf-8") (default: ``None``, the
encoding will be auto-detected).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
``klass``
class which is used to instantiate the return value (optional,
default: ``None``, the return value with be a :class:`~polib.POFile`
instance).
"""
return _pofile_or_mofile(pofile, 'pofile', **kwargs)
# }}}
# function mofile() {{{
def mofile(mofile, **kwargs):
"""
Convenience function that parses the mo file ``mofile`` and returns a
:class:`~polib.MOFile` instance.
Arguments:
``mofile``
string, full or relative path to the mo file or its content (data).
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was passed
to xgettext to generate the po file that was used to format the mo file
(optional, default: ``78``).
``encoding``
string, the encoding to use (e.g. "utf-8") (default: ``None``, the
encoding will be auto-detected).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
``klass``
class which is used to instantiate the return value (optional,
default: ``None``, the return value with be a :class:`~polib.POFile`
instance).
"""
return _pofile_or_mofile(mofile, 'mofile', **kwargs)
# }}}
# function detect_encoding() {{{
def detect_encoding(file, binary_mode=False):
"""
Try to detect the encoding used by the ``file``. The ``file`` argument can
be a PO or MO file path or a string containing the contents of the file.
If the encoding cannot be detected, the function will return the value of
``default_encoding``.
Arguments:
``file``
string, full or relative path to the po/mo file or its content.
``binary_mode``
boolean, set this to True if ``file`` is a mo file.
"""
PATTERN = r'"?Content-Type:.+? charset=([\w_\-:\.]+)'
rxt = re.compile(u(PATTERN))
rxb = re.compile(b(PATTERN))
def charset_exists(charset):
"""Check whether ``charset`` is valid or not."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
try:
is_file = os.path.exists(file)
except (ValueError, UnicodeEncodeError):
is_file = False
if not is_file:
match = rxt.search(file)
if match:
enc = match.group(1).strip()
if charset_exists(enc):
return enc
else:
# For PY3, always treat as binary
if binary_mode or PY3:
mode = 'rb'
rx = rxb
else:
mode = 'r'
rx = rxt
f = open(file, mode)
for l in f.readlines():
match = rx.search(l)
if match:
f.close()
enc = match.group(1).strip()
if not isinstance(enc, text_type):
enc = enc.decode('utf-8')
if charset_exists(enc):
return enc
f.close()
return default_encoding
# }}}
# function escape() {{{
def escape(st):
"""
Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
the given string ``st`` and returns it.
"""
return st.replace('\\', r'\\')\
.replace('\t', r'\t')\
.replace('\r', r'\r')\
.replace('\n', r'\n')\
.replace('\"', r'\"')
# }}}
# function unescape() {{{
def unescape(st):
"""
Unescapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
the given string ``st`` and returns it.
"""
def unescape_repl(m):
m = m.group(1)
if m == 'n':
return '\n'
if m == 't':
return '\t'
if m == 'r':
return '\r'
if m == '\\':
return '\\'
return m # handles escaped double quote
return re.sub(r'\\(\\|n|t|r|")', unescape_repl, st)
# }}}
# class _BaseFile {{{
class _BaseFile(list):
"""
Common base class for the :class:`~polib.POFile` and :class:`~polib.MOFile`
classes. This class should **not** be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``pofile``
string, the path to the po or mo file, or its content as a string.
``wrapwidth``
integer, the wrap width, only useful when the ``-w`` option was
passed to xgettext (optional, default: ``78``).
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file, (optional, default: ``False``).
"""
list.__init__(self)
# the opened file handle
pofile = kwargs.get('pofile', None)
if pofile and os.path.exists(pofile):
self.fpath = pofile
else:
self.fpath = kwargs.get('fpath')
# the width at which lines should be wrapped
self.wrapwidth = kwargs.get('wrapwidth', 78)
# the file encoding
self.encoding = kwargs.get('encoding', default_encoding)
# whether to check for duplicate entries or not
self.check_for_duplicates = kwargs.get('check_for_duplicates', False)
# header
self.header = ''
# both po and mo files have metadata
self.metadata = {}
self.metadata_is_fuzzy = 0
def __unicode__(self):
"""
Returns the unicode representation of the file.
"""
ret = []
entries = [self.metadata_as_entry()] + \
[e for e in self if not e.obsolete]
for entry in entries:
ret.append(entry.__unicode__(self.wrapwidth))
for entry in self.obsolete_entries():
ret.append(entry.__unicode__(self.wrapwidth))
ret = u('\n').join(ret)
assert isinstance(ret, text_type)
#if type(ret) != text_type:
# return unicode(ret, self.encoding)
return ret
if PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
"""
Returns the string representation of the file.
"""
return unicode(self).encode(self.encoding)
def __contains__(self, entry):
"""
Overriden ``list`` method to implement the membership test (in and
not in).
The method considers that an entry is in the file if it finds an entry
that has the same msgid (the test is **case sensitive**) and the same
msgctxt (or none for both entries).
Argument:
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
return self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt) \
is not None
def __eq__(self, other):
return str(self) == str(other)
def append(self, entry):
"""
Overriden method to check for duplicates entries, if a user tries to
add an entry that is already in the file, the method will raise a
``ValueError`` exception.
Argument:
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
if self.check_for_duplicates and entry in self:
raise ValueError('Entry "%s" already exists' % entry.msgid)
super(_BaseFile, self).append(entry)
def insert(self, index, entry):
"""
Overriden method to check for duplicates entries, if a user tries to
add an entry that is already in the file, the method will raise a
``ValueError`` exception.
Arguments:
``index``
index at which the entry should be inserted.
``entry``
an instance of :class:`~polib._BaseEntry`.
"""
if self.check_for_duplicates and entry in self:
raise ValueError('Entry "%s" already exists' % entry.msgid)
super(_BaseFile, self).insert(index, entry)
def metadata_as_entry(self):
"""
Returns the file metadata as a :class:`~polib.POFile` instance.
"""
e = POEntry(msgid='')
mdata = self.ordered_metadata()
if mdata:
strs = []
for name, value in mdata:
# Strip whitespace off each line in a multi-line entry
strs.append('%s: %s' % (name, value))
e.msgstr = '\n'.join(strs) + '\n'
if self.metadata_is_fuzzy:
e.flags.append('fuzzy')
return e
def save(self, fpath=None, repr_method='__unicode__'):
"""
Saves the po file to ``fpath``.
If it is an existing file and no ``fpath`` is provided, then the
existing file is rewritten with the modified data.
Keyword arguments:
``fpath``
string, full or relative path to the file.
``repr_method``
string, the method to use for output.
"""
if self.fpath is None and fpath is None:
raise IOError('You must provide a file path to save() method')
contents = getattr(self, repr_method)()
if fpath is None:
fpath = self.fpath
if repr_method == 'to_binary':
fhandle = open(fpath, 'wb')
else:
fhandle = codecs.open(fpath, 'w', self.encoding)
if not isinstance(contents, text_type):
contents = contents.decode(self.encoding)
fhandle.write(contents)
fhandle.close()
# set the file path if not set
if self.fpath is None and fpath:
self.fpath = fpath
def find(self, st, by='msgid', include_obsolete_entries=False,
msgctxt=False):
"""
Find the entry which msgid (or property identified by the ``by``
argument) matches the string ``st``.
Keyword arguments:
``st``
string, the string to search for.
``by``
string, the property to use for comparison (default: ``msgid``).
``include_obsolete_entries``
boolean, whether to also search in entries that are obsolete.
``msgctxt``
string, allows to specify a specific message context for the
search.
"""
if include_obsolete_entries:
entries = self[:]
else:
entries = [e for e in self if not e.obsolete]
for e in entries:
if getattr(e, by) == st:
if msgctxt is not False and e.msgctxt != msgctxt:
continue
return e
return None
def ordered_metadata(self):
"""
Convenience method that returns an ordered version of the metadata
dictionary. The return value is list of tuples (metadata name,
metadata_value).
"""
# copy the dict first
metadata = self.metadata.copy()
data_order = [
'Project-Id-Version',
'Report-Msgid-Bugs-To',
'POT-Creation-Date',
'PO-Revision-Date',
'Last-Translator',
'Language-Team',
'MIME-Version',
'Content-Type',
'Content-Transfer-Encoding'
]
ordered_data = []
for data in data_order:
try:
value = metadata.pop(data)
ordered_data.append((data, value))
except KeyError:
pass
# the rest of the metadata will be alphabetically ordered since there
# are no specs for this AFAIK
for data in sorted(metadata.keys()):
value = metadata[data]
ordered_data.append((data, value))
return ordered_data
def to_binary(self):
"""
Return the binary representation of the file.
"""
offsets = []
entries = self.translated_entries()
# the keys are sorted in the .mo file
def cmp(_self, other):
# msgfmt compares entries with msgctxt if it exists
self_msgid = _self.msgctxt and _self.msgctxt or _self.msgid
other_msgid = other.msgctxt and other.msgctxt or other.msgid
if self_msgid > other_msgid:
return 1
elif self_msgid < other_msgid:
return -1
else:
return 0
# add metadata entry
entries.sort(key=lambda o: o.msgctxt or o.msgid)
mentry = self.metadata_as_entry()
#mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
entries = [mentry] + entries
entries_len = len(entries)
ids, strs = b(''), b('')
for e in entries:
# For each string, we need size and file offset. Each string is
# NUL terminated; the NUL does not count into the size.
msgid = b('')
if e.msgctxt:
# Contexts are stored by storing the concatenation of the
# context, a <EOT> byte, and the original string
msgid = self._encode(e.msgctxt + '\4')
if e.msgid_plural:
msgstr = []
for index in sorted(e.msgstr_plural.keys()):
msgstr.append(e.msgstr_plural[index])
msgid += self._encode(e.msgid + '\0' + e.msgid_plural)
msgstr = self._encode('\0'.join(msgstr))
else:
msgid += self._encode(e.msgid)
msgstr = self._encode(e.msgstr)
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b('\0')
strs += msgstr + b('\0')
# The header is 7 32-bit unsigned integers.
keystart = 7 * 4 + 16 * entries_len
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
# check endianness for magic number
if struct.pack('@h', 1) == struct.pack('<h', 1):
magic_number = MOFile.LITTLE_ENDIAN
else:
magic_number = MOFile.BIG_ENDIAN
output = struct.pack(
"Iiiiiii",
# Magic number
magic_number,
# Version
0,
# number of entries
entries_len,
# start of key index
7 * 4,
# start of value index
7 * 4 + entries_len * 8,
# size and offset of hash table, we don't use hash tables
0, keystart
)
if PY3 and sys.version_info.minor > 1: # python 3.2 or superior
output += array.array("i", offsets).tobytes()
else:
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def _encode(self, mixed):
"""
Encodes the given ``mixed`` argument with the file encoding if and
only if it's an unicode string and returns the encoded string.
"""
if isinstance(mixed, text_type):
mixed = mixed.encode(self.encoding)
return mixed
# }}}
# class POFile {{{
class POFile(_BaseFile):
"""
Po (or Pot) file reader/writer.
This class inherits the :class:`~polib._BaseFile` class and, by extension,
the python ``list`` type.
"""
def __unicode__(self):
"""
Returns the unicode representation of the po file.
"""
ret, headers = '', self.header.split('\n')
for header in headers:
if header[:1] in [',', ':']:
ret += '#%s\n' % header
else:
ret += '# %s\n' % header
if not isinstance(ret, text_type):
ret = ret.decode(self.encoding)
return ret + _BaseFile.__unicode__(self)
def save_as_mofile(self, fpath):
"""
Saves the binary representation of the file to given ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the mo file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method that returns the percentage of translated
messages.
"""
total = len([e for e in self if not e.obsolete])
if total == 0:
return 100
translated = len(self.translated_entries())
return int((100.00 / float(total)) * translated)
def translated_entries(self):
"""
Convenience method that returns the list of translated entries.
"""
return [e for e in self if e.translated()]
def untranslated_entries(self):
"""
Convenience method that returns the list of untranslated entries.
"""
return [e for e in self if not e.translated() and not e.obsolete
and not 'fuzzy' in e.flags]
def fuzzy_entries(self):
"""
Convenience method that returns the list of fuzzy entries.
"""
return [e for e in self if 'fuzzy' in e.flags]
def obsolete_entries(self):
"""
Convenience method that returns the list of obsolete entries.
"""
return [e for e in self if e.obsolete]
def merge(self, refpot):
"""
Convenience method that merges the current pofile with the pot file
provided. It behaves exactly as the gettext msgmerge utility:
* comments of this file will be preserved, but extracted comments and
occurrences will be discarded;
* any translations or comments in the file will be discarded, however,
dot comments and file positions will be preserved;
* the fuzzy flags are preserved.
Keyword argument:
``refpot``
object POFile, the reference catalog.
"""
# Store entries in dict/set for faster access
self_entries = dict((entry.msgid, entry) for entry in self)
refpot_msgids = set(entry.msgid for entry in refpot)
# Merge entries that are in the refpot
for entry in refpot:
e = self_entries.get(entry.msgid)
if e is None:
e = POEntry()
self.append(e)
e.merge(entry)
# ok, now we must "obsolete" entries that are not in the refpot anymore
for entry in self:
if entry.msgid not in refpot_msgids:
entry.obsolete = True
# }}}
# class MOFile {{{
class MOFile(_BaseFile):
"""
Mo file reader/writer.
This class inherits the :class:`~polib._BaseFile` class and, by
extension, the python ``list`` type.
"""
BIG_ENDIAN = 0xde120495
LITTLE_ENDIAN = 0x950412de
def __init__(self, *args, **kwargs):
"""
Constructor, accepts all keywords arguments accepted by
:class:`~polib._BaseFile` class.
"""
_BaseFile.__init__(self, *args, **kwargs)
self.magic_number = None
self.version = 0
def save_as_pofile(self, fpath):
"""
Saves the mofile as a pofile to ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the file.
"""
_BaseFile.save(self, fpath)
def save(self, fpath=None):
"""
Saves the mofile to ``fpath``.
Keyword argument:
``fpath``
string, full or relative path to the file.
"""
_BaseFile.save(self, fpath, 'to_binary')
def percent_translated(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return 100
def translated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return self
def untranslated_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def fuzzy_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
def obsolete_entries(self):
"""
Convenience method to keep the same interface with POFile instances.
"""
return []
# }}}
# class _BaseEntry {{{
class _BaseEntry(object):
"""
Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes.
This class should **not** be instanciated directly.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``msgid``
string, the entry msgid.
``msgstr``
string, the entry msgstr.
``msgid_plural``
string, the entry msgid_plural.
``msgstr_plural``
list, the entry msgstr_plural lines.
``msgctxt``
string, the entry context (msgctxt).
``obsolete``
bool, whether the entry is "obsolete" or not.
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
"""
self.msgid = kwargs.get('msgid', '')
self.msgstr = kwargs.get('msgstr', '')
self.msgid_plural = kwargs.get('msgid_plural', '')
self.msgstr_plural = kwargs.get('msgstr_plural', {})
self.msgctxt = kwargs.get('msgctxt', None)
self.obsolete = kwargs.get('obsolete', False)
self.encoding = kwargs.get('encoding', default_encoding)
def __unicode__(self, wrapwidth=78):
"""
Returns the unicode representation of the entry.
"""
if self.obsolete:
delflag = '#~ '
else:
delflag = ''
ret = []
# write the msgctxt if any
if self.msgctxt is not None:
ret += self._str_field("msgctxt", delflag, "", self.msgctxt,
wrapwidth)
# write the msgid
ret += self._str_field("msgid", delflag, "", self.msgid, wrapwidth)
# write the msgid_plural if any
if self.msgid_plural:
ret += self._str_field("msgid_plural", delflag, "",
self.msgid_plural, wrapwidth)
if self.msgstr_plural:
# write the msgstr_plural if any
msgstrs = self.msgstr_plural
keys = list(msgstrs)
keys.sort()
for index in keys:
msgstr = msgstrs[index]
plural_index = '[%s]' % index
ret += self._str_field("msgstr", delflag, plural_index, msgstr,
wrapwidth)
else:
# otherwise write the msgstr
ret += self._str_field("msgstr", delflag, "", self.msgstr,
wrapwidth)
ret.append('')
ret = u('\n').join(ret)
return ret
if PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
"""
Returns the string representation of the entry.
"""
return unicode(self).encode(self.encoding)
def __eq__(self, other):
return str(self) == str(other)
def _str_field(self, fieldname, delflag, plural_index, field,
wrapwidth=78):
lines = field.splitlines(True)
if len(lines) > 1:
lines = [''] + lines # start with initial empty line
else:
escaped_field = escape(field)
specialchars_count = 0
for c in ['\\', '\n', '\r', '\t', '"']:
specialchars_count += field.count(c)
# comparison must take into account fieldname length + one space
# + 2 quotes (eg. msgid "<string>")
flength = len(fieldname) + 3
if plural_index:
flength += len(plural_index)
real_wrapwidth = wrapwidth - flength + specialchars_count
if wrapwidth > 0 and len(field) > real_wrapwidth:
# Wrap the line but take field name into account
lines = [''] + [unescape(item) for item in wrap(
escaped_field,
wrapwidth - 2, # 2 for quotes ""
drop_whitespace=False,
break_long_words=False
)]
else:
lines = [field]
if fieldname.startswith('previous_'):
# quick and dirty trick to get the real field name
fieldname = fieldname[9:]
ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
escape(lines.pop(0)))]
for mstr in lines:
#import pdb; pdb.set_trace()
ret.append('%s"%s"' % (delflag, escape(mstr)))
return ret
# }}}
# class POEntry {{{
class POEntry(_BaseEntry):
"""
Represents a po file entry.
"""
def __init__(self, *args, **kwargs):
"""
Constructor, accepts the following keyword arguments:
``comment``
string, the entry comment.
``tcomment``
string, the entry translator comment.
``occurrences``
list, the entry occurrences.
``flags``
list, the entry flags.
``previous_msgctxt``
string, the entry previous context.
``previous_msgid``
string, the entry previous msgid.
``previous_msgid_plural``
string, the entry previous msgid_plural.
"""
_BaseEntry.__init__(self, *args, **kwargs)
self.comment = kwargs.get('comment', '')
self.tcomment = kwargs.get('tcomment', '')
self.occurrences = kwargs.get('occurrences', [])
self.flags = kwargs.get('flags', [])
self.previous_msgctxt = kwargs.get('previous_msgctxt', None)
self.previous_msgid = kwargs.get('previous_msgid', None)
self.previous_msgid_plural = kwargs.get('previous_msgid_plural', None)
def __unicode__(self, wrapwidth=78):
"""
Returns the unicode representation of the entry.
"""
if self.obsolete:
return _BaseEntry.__unicode__(self, wrapwidth)
ret = []
# comments first, if any (with text wrapping as xgettext does)
comments = [('comment', '#. '), ('tcomment', '# ')]
for c in comments:
val = getattr(self, c[0])
if val:
for comment in val.split('\n'):
if wrapwidth > 0 and len(comment) + len(c[1]) > wrapwidth:
ret += wrap(
comment,
wrapwidth,
initial_indent=c[1],
subsequent_indent=c[1],
break_long_words=False
)
else:
ret.append('%s%s' % (c[1], comment))
# occurrences (with text wrapping as xgettext does)
if self.occurrences:
filelist = []
for fpath, lineno in self.occurrences:
if lineno:
filelist.append('%s:%s' % (fpath, lineno))
else:
filelist.append(fpath)
filestr = ' '.join(filelist)
if wrapwidth > 0 and len(filestr) + 3 > wrapwidth:
# textwrap split words that contain hyphen, this is not
# what we want for filenames, so the dirty hack is to
# temporally replace hyphens with a char that a file cannot
# contain, like "*"
ret += [l.replace('*', '-') for l in wrap(
filestr.replace('-', '*'),
wrapwidth,
initial_indent='#: ',
subsequent_indent='#: ',
break_long_words=False
)]
else:
ret.append('#: ' + filestr)
# flags (TODO: wrapping ?)
if self.flags:
ret.append('#, %s' % ', '.join(self.flags))
# previous context and previous msgid/msgid_plural
fields = ['previous_msgctxt', 'previous_msgid',
'previous_msgid_plural']
for f in fields:
val = getattr(self, f)
if val:
ret += self._str_field(f, "#| ", "", val, wrapwidth)
ret.append(_BaseEntry.__unicode__(self, wrapwidth))
ret = u('\n').join(ret)
assert isinstance(ret, text_type)
#if type(ret) != types.UnicodeType:
# return unicode(ret, self.encoding)
return ret
def __cmp__(self, other):
"""
Called by comparison operations if rich comparison is not defined.
"""
# First: Obsolete test
if self.obsolete != other.obsolete:
if self.obsolete:
return -1
else:
return 1
# Work on a copy to protect original
occ1 = sorted(self.occurrences[:])
occ2 = sorted(other.occurrences[:])
pos = 0
for entry1 in occ1:
try:
entry2 = occ2[pos]
except IndexError:
return 1
pos = pos + 1
if entry1[0] != entry2[0]:
if entry1[0] > entry2[0]:
return 1
else:
return -1
if entry1[1] != entry2[1]:
if entry1[1] > entry2[1]:
return 1
else:
return -1
# Finally: Compare message ID
if self.msgid > other.msgid:
return 1
elif self.msgid < other.msgid:
return -1
return 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __hash__(self):
return hash((self.msgid, self.msgstr))
def translated(self):
"""
Returns ``True`` if the entry has been translated or ``False``
otherwise.
"""
if self.obsolete or 'fuzzy' in self.flags:
return False
if self.msgstr != '':
return True
if self.msgstr_plural:
for pos in self.msgstr_plural:
if self.msgstr_plural[pos] == '':
return False
return True
return False
def merge(self, other):
"""
Merge the current entry with the given pot entry.
"""
self.msgid = other.msgid
self.msgctxt = other.msgctxt
self.occurrences = other.occurrences
self.comment = other.comment
fuzzy = 'fuzzy' in self.flags
self.flags = other.flags[:] # clone flags
if fuzzy:
self.flags.append('fuzzy')
self.msgid_plural = other.msgid_plural
self.obsolete = other.obsolete
self.previous_msgctxt = other.previous_msgctxt
self.previous_msgid = other.previous_msgid
self.previous_msgid_plural = other.previous_msgid_plural
if other.msgstr_plural:
for pos in other.msgstr_plural:
try:
# keep existing translation at pos if any
self.msgstr_plural[pos]
except KeyError:
self.msgstr_plural[pos] = ''
# }}}
# class MOEntry {{{
class MOEntry(_BaseEntry):
"""
Represents a mo file entry.
"""
pass
# }}}
# class _POFileParser {{{
class _POFileParser(object):
"""
A finite state machine to parse efficiently and correctly po
file format.
"""
def __init__(self, pofile, *args, **kwargs):
"""
Constructor.
Keyword arguments:
``pofile``
string, path to the po file or its content
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
"""
enc = kwargs.get('encoding', default_encoding)
if os.path.exists(pofile):
try:
self.fhandle = codecs.open(pofile, 'rU', enc)
except LookupError:
enc = default_encoding
self.fhandle = codecs.open(pofile, 'rU', enc)
else:
self.fhandle = pofile.splitlines()
klass = kwargs.get('klass')
if klass is None:
klass = POFile
self.instance = klass(
pofile=pofile,
encoding=enc,
check_for_duplicates=kwargs.get('check_for_duplicates', False)
)
self.transitions = {}
self.current_entry = POEntry()
self.current_state = 'ST'
self.current_token = None
# two memo flags used in handlers
self.msgstr_index = 0
self.entry_obsolete = 0
# Configure the state machine, by adding transitions.
# Signification of symbols:
# * ST: Beginning of the file (start)
# * HE: Header
# * TC: a translation comment
# * GC: a generated comment
# * OC: a file/line occurence
# * FL: a flags line
# * CT: a message context
# * PC: a previous msgctxt
# * PM: a previous msgid
# * PP: a previous msgid_plural
# * MI: a msgid
# * MP: a msgid plural
# * MS: a msgstr
# * MX: a msgstr plural
# * MC: a msgid or msgstr continuation line
all = ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'PC', 'PM', 'PP', 'TC',
'MS', 'MP', 'MX', 'MI']
self.add('TC', ['ST', 'HE'], 'HE')
self.add('TC', ['GC', 'OC', 'FL', 'TC', 'PC', 'PM', 'PP', 'MS',
'MP', 'MX', 'MI'], 'TC')
self.add('GC', all, 'GC')
self.add('OC', all, 'OC')
self.add('FL', all, 'FL')
self.add('PC', all, 'PC')
self.add('PM', all, 'PM')
self.add('PP', all, 'PP')
self.add('CT', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'PC', 'PM',
'PP', 'MS', 'MX'], 'CT')
self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'TC', 'PC',
'PM', 'PP', 'MS', 'MX'], 'MI')
self.add('MP', ['TC', 'GC', 'PC', 'PM', 'PP', 'MI'], 'MP')
self.add('MS', ['MI', 'MP', 'TC'], 'MS')
self.add('MX', ['MI', 'MX', 'MP', 'TC'], 'MX')
self.add('MC', ['CT', 'MI', 'MP', 'MS', 'MX', 'PM', 'PP', 'PC'], 'MC')
def parse(self):
"""
Run the state machine, parse the file line by line and call process()
with the current matched symbol.
"""
i = 0
keywords = {
'msgctxt': 'CT',
'msgid': 'MI',
'msgstr': 'MS',
'msgid_plural': 'MP',
}
prev_keywords = {
'msgid_plural': 'PP',
'msgid': 'PM',
'msgctxt': 'PC',
}
for line in self.fhandle:
i += 1
line = line.strip()
if line == '':
continue
tokens = line.split(None, 2)
nb_tokens = len(tokens)
if tokens[0] == '#~|':
continue
if tokens[0] == '#~' and nb_tokens > 1:
line = line[3:].strip()
tokens = tokens[1:]
nb_tokens -= 1
self.entry_obsolete = 1
else:
self.entry_obsolete = 0
# Take care of keywords like
# msgid, msgid_plural, msgctxt & msgstr.
if tokens[0] in keywords and nb_tokens > 1:
line = line[len(tokens[0]):].lstrip()
if re.search(r'([^\\]|^)"', line[1:-1]):
raise IOError('Syntax error in po file %s (line %s): '
'unescaped double quote found' %
(self.instance.fpath, i))
self.current_token = line
self.process(keywords[tokens[0]], i)
continue
self.current_token = line
if tokens[0] == '#:':
if nb_tokens <= 1:
continue
# we are on a occurrences line
self.process('OC', i)
elif line[:1] == '"':
# we are on a continuation line
if re.search(r'([^\\]|^)"', line[1:-1]):
raise IOError('Syntax error in po file %s (line %s): '
'unescaped double quote found' %
(self.instance.fpath, i))
self.process('MC', i)
elif line[:7] == 'msgstr[':
# we are on a msgstr plural
self.process('MX', i)
elif tokens[0] == '#,':
if nb_tokens <= 1:
continue
# we are on a flags line
self.process('FL', i)
elif tokens[0] == '#' or tokens[0].startswith('##'):
if line == '#':
line += ' '
# we are on a translator comment line
self.process('TC', i)
elif tokens[0] == '#.':
if nb_tokens <= 1:
continue
# we are on a generated comment line
self.process('GC', i)
elif tokens[0] == '#|':
if nb_tokens <= 1:
raise IOError('Syntax error in po file %s (line %s)' %
(self.instance.fpath, i))
# Remove the marker and any whitespace right after that.
line = line[2:].lstrip()
self.current_token = line
if tokens[1].startswith('"'):
# Continuation of previous metadata.
self.process('MC', i)
continue
if nb_tokens == 2:
# Invalid continuation line.
raise IOError('Syntax error in po file %s (line %s): '
'invalid continuation line' %
(self.instance.fpath, i))
# we are on a "previous translation" comment line,
if tokens[1] not in prev_keywords:
# Unknown keyword in previous translation comment.
raise IOError('Syntax error in po file %s (line %s): '
'unknown keyword %s' %
(self.instance.fpath, i, tokens[1]))
# Remove the keyword and any whitespace
# between it and the starting quote.
line = line[len(tokens[1]):].lstrip()
self.current_token = line
self.process(prev_keywords[tokens[1]], i)
else:
raise IOError('Syntax error in po file %s (line %s)' %
(self.instance.fpath, i))
if self.current_entry:
# since entries are added when another entry is found, we must add
# the last entry here (only if there are lines)
self.instance.append(self.current_entry)
# before returning the instance, check if there's metadata and if
# so extract it in a dict
metadataentry = self.instance.find('')
if metadataentry: # metadata found
# remove the entry
self.instance.remove(metadataentry)
self.instance.metadata_is_fuzzy = metadataentry.flags
key = None
for msg in metadataentry.msgstr.splitlines():
try:
key, val = msg.split(':', 1)
self.instance.metadata[key] = val.strip()
except (ValueError, KeyError):
if key is not None:
self.instance.metadata[key] += '\n' + msg.strip()
# close opened file
if not isinstance(self.fhandle, list): # must be file
self.fhandle.close()
return self.instance
def add(self, symbol, states, next_state):
"""
Add a transition to the state machine.
Keywords arguments:
``symbol``
string, the matched token (two chars symbol).
``states``
list, a list of states (two chars symbols).
``next_state``
the next state the fsm will have after the action.
"""
for state in states:
action = getattr(self, 'handle_%s' % next_state.lower())
self.transitions[(symbol, state)] = (action, next_state)
def process(self, symbol, linenum):
"""
Process the transition corresponding to the current state and the
symbol provided.
Keywords arguments:
``symbol``
string, the matched token (two chars symbol).
``linenum``
integer, the current line number of the parsed file.
"""
try:
(action, state) = self.transitions[(symbol, self.current_state)]
if action():
self.current_state = state
except Exception:
raise IOError('Syntax error in po file (line %s)' % linenum)
# state handlers
def handle_he(self):
"""Handle a header comment."""
if self.instance.header != '':
self.instance.header += '\n'
self.instance.header += self.current_token[2:]
return 1
def handle_tc(self):
"""Handle a translator comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.tcomment != '':
self.current_entry.tcomment += '\n'
tcomment = self.current_token.lstrip('#')
if tcomment.startswith(' '):
tcomment = tcomment[1:]
self.current_entry.tcomment += tcomment
return True
def handle_gc(self):
"""Handle a generated comment."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
if self.current_entry.comment != '':
self.current_entry.comment += '\n'
self.current_entry.comment += self.current_token[3:]
return True
def handle_oc(self):
"""Handle a file:num occurence."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
occurrences = self.current_token[3:].split()
for occurrence in occurrences:
if occurrence != '':
try:
fil, line = occurrence.split(':')
if not line.isdigit():
fil = fil + line
line = ''
self.current_entry.occurrences.append((fil, line))
except (ValueError, AttributeError):
self.current_entry.occurrences.append((occurrence, ''))
return True
def handle_fl(self):
"""Handle a flags line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.flags += self.current_token[3:].split(', ')
return True
def handle_pp(self):
"""Handle a previous msgid_plural line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.previous_msgid_plural = \
unescape(self.current_token[1:-1])
return True
def handle_pm(self):
"""Handle a previous msgid line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.previous_msgid = \
unescape(self.current_token[1:-1])
return True
def handle_pc(self):
"""Handle a previous msgctxt line."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.previous_msgctxt = \
unescape(self.current_token[1:-1])
return True
def handle_ct(self):
"""Handle a msgctxt."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.msgctxt = unescape(self.current_token[1:-1])
return True
def handle_mi(self):
"""Handle a msgid."""
if self.current_state in ['MC', 'MS', 'MX']:
self.instance.append(self.current_entry)
self.current_entry = POEntry()
self.current_entry.obsolete = self.entry_obsolete
self.current_entry.msgid = unescape(self.current_token[1:-1])
return True
def handle_mp(self):
"""Handle a msgid plural."""
self.current_entry.msgid_plural = unescape(self.current_token[1:-1])
return True
def handle_ms(self):
"""Handle a msgstr."""
self.current_entry.msgstr = unescape(self.current_token[1:-1])
return True
def handle_mx(self):
"""Handle a msgstr plural."""
index, value = self.current_token[7], self.current_token[11:-1]
self.current_entry.msgstr_plural[index] = unescape(value)
self.msgstr_index = index
return True
def handle_mc(self):
"""Handle a msgid or msgstr continuation line."""
token = unescape(self.current_token[1:-1])
if self.current_state == 'CT':
self.current_entry.msgctxt += token
elif self.current_state == 'MI':
self.current_entry.msgid += token
elif self.current_state == 'MP':
self.current_entry.msgid_plural += token
elif self.current_state == 'MS':
self.current_entry.msgstr += token
elif self.current_state == 'MX':
self.current_entry.msgstr_plural[self.msgstr_index] += token
elif self.current_state == 'PP':
token = token[3:]
self.current_entry.previous_msgid_plural += token
elif self.current_state == 'PM':
token = token[3:]
self.current_entry.previous_msgid += token
elif self.current_state == 'PC':
token = token[3:]
self.current_entry.previous_msgctxt += token
# don't change the current state
return False
# }}}
# class _MOFileParser {{{
class _MOFileParser(object):
"""
A class to parse binary mo files.
"""
def __init__(self, mofile, *args, **kwargs):
"""
Constructor.
Keyword arguments:
``mofile``
string, path to the mo file or its content
``encoding``
string, the encoding to use, defaults to ``default_encoding``
global variable (optional).
``check_for_duplicates``
whether to check for duplicate entries when adding entries to the
file (optional, default: ``False``).
"""
self.fhandle = open(mofile, 'rb')
klass = kwargs.get('klass')
if klass is None:
klass = MOFile
self.instance = klass(
fpath=mofile,
encoding=kwargs.get('encoding', default_encoding),
check_for_duplicates=kwargs.get('check_for_duplicates', False)
)
def parse(self):
"""
Build the instance with the file handle provided in the
constructor.
"""
# parse magic number
magic_number = self._readbinary('<I', 4)
if magic_number == MOFile.LITTLE_ENDIAN:
ii = '<II'
elif magic_number == MOFile.BIG_ENDIAN:
ii = '>II'
else:
raise IOError('Invalid mo file, magic number is incorrect !')
self.instance.magic_number = magic_number
# parse the version number and the number of strings
self.instance.version, numofstrings = self._readbinary(ii, 8)
# original strings and translation strings hash table offset
msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
# move to msgid hash table and read length and offset of msgids
self.fhandle.seek(msgids_hash_offset)
msgids_index = []
for i in range(numofstrings):
msgids_index.append(self._readbinary(ii, 8))
# move to msgstr hash table and read length and offset of msgstrs
self.fhandle.seek(msgstrs_hash_offset)
msgstrs_index = []
for i in range(numofstrings):
msgstrs_index.append(self._readbinary(ii, 8))
# build entries
encoding = self.instance.encoding
for i in range(numofstrings):
self.fhandle.seek(msgids_index[i][1])
msgid = self.fhandle.read(msgids_index[i][0])
self.fhandle.seek(msgstrs_index[i][1])
msgstr = self.fhandle.read(msgstrs_index[i][0])
if i == 0: # metadata
raw_metadata, metadata = msgstr.split(b('\n')), {}
for line in raw_metadata:
tokens = line.split(b(':'), 1)
if tokens[0] != b(''):
try:
k = tokens[0].decode(encoding)
v = tokens[1].decode(encoding)
metadata[k] = v.strip()
except IndexError:
metadata[k] = u('')
self.instance.metadata = metadata
continue
# test if we have a plural entry
msgid_tokens = msgid.split(b('\0'))
if len(msgid_tokens) > 1:
entry = self._build_entry(
msgid=msgid_tokens[0],
msgid_plural=msgid_tokens[1],
msgstr_plural=dict((k, v) for k, v in
enumerate(msgstr.split(b('\0'))))
)
else:
entry = self._build_entry(msgid=msgid, msgstr=msgstr)
self.instance.append(entry)
# close opened file
self.fhandle.close()
return self.instance
def _build_entry(self, msgid, msgstr=None, msgid_plural=None,
msgstr_plural=None):
msgctxt_msgid = msgid.split(b('\x04'))
encoding = self.instance.encoding
if len(msgctxt_msgid) > 1:
kwargs = {
'msgctxt': msgctxt_msgid[0].decode(encoding),
'msgid': msgctxt_msgid[1].decode(encoding),
}
else:
kwargs = {'msgid': msgid.decode(encoding)}
if msgstr:
kwargs['msgstr'] = msgstr.decode(encoding)
if msgid_plural:
kwargs['msgid_plural'] = msgid_plural.decode(encoding)
if msgstr_plural:
for k in msgstr_plural:
msgstr_plural[k] = msgstr_plural[k].decode(encoding)
kwargs['msgstr_plural'] = msgstr_plural
return MOEntry(**kwargs)
def _readbinary(self, fmt, numbytes):
"""
Private method that unpack n bytes of data using format <fmt>.
It returns a tuple or a mixed value if the tuple length is 1.
"""
bytes = self.fhandle.read(numbytes)
tup = struct.unpack(fmt, bytes)
if len(tup) == 1:
return tup[0]
return tup
# }}}
# class TextWrapper {{{
class TextWrapper(textwrap.TextWrapper):
"""
Subclass of textwrap.TextWrapper that backport the
drop_whitespace option.
"""
def __init__(self, *args, **kwargs):
drop_whitespace = kwargs.pop('drop_whitespace', True)
textwrap.TextWrapper.__init__(self, *args, **kwargs)
self.drop_whitespace = drop_whitespace
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and not cur_line[-1].strip():
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# }}}
# function wrap() {{{
def wrap(text, width=70, **kwargs):
"""
Wrap a single paragraph of text, returning a list of wrapped lines.
"""
if sys.version_info < (2, 6):
return TextWrapper(width=width, **kwargs).wrap(text)
return textwrap.wrap(text, width=width, **kwargs)
# }}}
| mit | -254,093,164,069,252,670 | 32.730331 | 79 | 0.520291 | false |
VladimirShe/in100gram | in100gram/in100gram/settings.py | 1 | 2137 | """
Django settings for in100gram project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '22q%is3ng$=h^cx8(8%&(9)@7e9yi(m^lk#w5a#j8ym5!-mlua'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'in100gram.urls'
WSGI_APPLICATION = 'in100gram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': 'in100gram',
'USER': 'in100gram',
'PASSWORD': 'in100gram',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | -7,077,503,071,250,312,000 | 23.563218 | 71 | 0.709406 | false |
mtdx/ml-algorithms | neural-networks/cnn.py | 1 | 2621 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_classes = 10
batch_size = 128
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
keep_rate = 0.8
keep_prob = tf.placeholder(tf.float32)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def maxpool2d(x):
# size of window movement of window
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def convolutional_neural_network(x):
weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
'W_fc': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1': tf.Variable(tf.random_normal([32])),
'b_conv2': tf.Variable(tf.random_normal([64])),
'b_fc': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool2d(conv1)
conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, 7 * 7 * 64])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights['out']) + biases['out']
return output
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
train_neural_network(x)
| mit | -4,813,126,551,715,110,000 | 33.038961 | 87 | 0.595193 | false |
m4sth0/sauventory | tests/test_ranktransform.py | 1 | 4574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016
# Author(s):
# Thomas Leppelt <[email protected]>
# This file is part of sauventory.
# Spatial Autocorrelated Uncertainty of Inventories
# sauventory is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# sauventory is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sauventory comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
# This is free software, and you are welcome to redistribute it
# under certain conditions; type `show c' for details.
"""
This module perform unittests for the rank transformation module.
Testing the module with example from Iman & Conover 1981:
https://www.uio.no/studier/emner/matnat/math/STK4400/v05/undervisningsmateriale
/A%20distribution-free%20approach%20to%20rank%20correlation.pdf
"""
import numpy as np
import unittest
from sauventory import ranktransform
class RankTransformTest(unittest.TestCase):
def setUp(self):
# Matrix R with n independent sampeld columns k, R = kxn.
self.r = np.array([[1.534, 1.534, -1.534, -1.534, .489, -.319],
[-.887, -.489, .887, -.887, -.157, .674],
[-.489, .674, -.489, 1.150, 1.534, -.489],
[.887, 0.000, -.674, .319, 0.000, -1.534],
[1.150, -.319, .489, .674, .157, 1.150],
[.157, -1.534, -.887, -.674, -.319, .157],
[-1.150, -.674, -.157, .157, -1.534, -.157],
[0.000, -.887, .157, -.319, -.674, .887],
[.319, -.157, .674, .887, .574, 1.534],
[-.319, .157, -.319, -1.150, 1.150, -.887],
[-1.534, .887, 1.150, 1.534, -.489, -1.150],
[-.157, -1.150, 1.534, -.157, -1.150, -.674],
[.489, .489, -1.150, .489, -.887, 0.000],
[.674, .319, .319, 0.000, .887, .319],
[-.674, 1.150, 0.000, -.489, .319, .489]])
# Example target correlation matrix.
self.c_star = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, .75, -.70],
[0, 0, 0, .75, 1, -.95],
[0, 0, 0, -.70, -.95, 1]])
# Result sample columns arranged to given correlation amtrix.
self.res = np.array([[1.534, 1.534, -1.534, -1.534, -0.887, 0.489],
[-0.887, -0.489, 0.887, -0.887, -0.674, 1.15],
[-0.489, 0.674, -0.489, 1.15, 1.534, -1.534],
[0.887, 0., -0.674, 0.319, 0.319, -0.887],
[1.15, -0.319, 0.489, 0.674, 0.574, -0.319],
[0.157, -1.534, -0.887, -0.674, -0.489, 0.674],
[-1.15, -0.674, -0.157, 0.157, -1.534, 0.887],
[0., -0.887, 0.157, -0.319, -0.319, 1.534],
[0.319, -0.157, 0.674, 0.887, 1.15, -0.674],
[-0.319, 0.157, -0.319, -1.15, 0.157, -0.157],
[-1.534, 0.887, 1.15, 1.534, 0.887, -1.15],
[-0.157, -1.15, 1.534, -0.157, -1.15, 0.319],
[0.489, 0.489, -1.15, 0.489, -0.157, 0.],
[0.674, 0.319, 0.319, 0., 0.489, -0.489],
[-0.674, 1.15, 0., -0.489, 0., 0.157]])
def tearDown(self):
pass
def test_conover(self):
r_cor = ranktransform.transform_by_corrmat(self.r, self.c_star)
compare = r_cor == self.res
self.assertEqual(compare.all(), True)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(RankTransformTest))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 | -1,744,700,304,624,888,300 | 42.980769 | 79 | 0.49366 | false |
T2DREAM/t2dream-portal | src/encoded/upgrade/file.py | 1 | 22524 | from snovault import upgrade_step
from pyramid.traversal import find_root
from datetime import datetime, time
@upgrade_step('file', '', '2')
def file_0_2(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
if 'status' in value:
value['status'] = value['status'].lower()
@upgrade_step('file', '2', '3')
def file_2_3(value, system):
# http://redmine.encodedcc.org/issues/1572
file_format = value.get('file_format')
file_name = value['download_path'].rsplit('/', 1)[-1]
file_ext = file_name[file_name.find('.'):]
# REJECTIONS
if file_ext in ['.gtf.bigBed', '.pdf', '.pdf.gz', '.gff.bigBed', '.spikeins']:
value['status'] = 'deleted'
# Find the miscatorgorized bedMethyls
if file_ext == '.bed.bigBed' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bedMethyl'
if file_ext == '.bed.gz' and 'MethylRrbs' in value.get('submitted_file_name'):
value['file_format'] = 'bed_bedMethyl'
unknownDict = {'.CEL.gz': 'CEL',
'.bb': 'bedMethyl',
'.bed': 'bed',
'.bed.gz': 'bed',
'.bed.bigBed': 'bigBed',
'.bigBed': 'bigBed',
'.bed9': 'bedMethyl',
'.bed9.gz': 'bed_bedMethyl',
'.bedCluster.bigBed': 'bigBed',
'.bedLogR.bigBed': 'bedLogR',
'.bedRnaElements.bigBed': 'bedRnaElements',
'.bedRrbs.bigBed': 'bedMethyl',
'.broadPeak.gz': 'bed_broadPeak',
'.bigBed': 'bigBed',
'.hic': 'hic',
'.bedgraph': 'bedgraph',
'.csfasta.gz': 'csfasta',
'.csqual.gz': 'csqual',
'.fasta.gz': 'fasta',
'.gff.bigBed': 'bigBed',
'.gff.gz': 'gtf',
'.gp.bigBed': 'bigBed',
'.matrix.gz': 'tsv',
'.matrix.tgz': 'tar',
'.narrowPeak': 'bed_narrowPeak',
'.narrowPeak.gz': 'bed_narrowPeak',
'.pdf': 'tsv', # These are going to be obsolete
'.pdf.gz': 'tsv', # These are going to be obsolete
'.peaks.gz': 'tsv',
'.peptideMapping.bigBed': 'bigBed',
'.shortFrags.bigBed': 'bigBed',
'.sorted.bigBed': 'bigBed',
'.tab.gz': 'tsv',
'.tgz': 'tar',
'.txt': 'tsv',
'.xlsx': 'tsv', # These need to be converted to tsv
}
if file_format in ['unknown', 'customTrack']:
value['file_format'] = unknownDict[file_ext]
# http://redmine.encodedcc.org/issues/1429
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
dataset_status = dataset.get('status')
status = value.get('status')
if status == 'current':
if dataset_status == 'released':
value['status'] = 'released'
else:
value['status'] = 'in progress'
if status == 'obsolete':
if dataset_status in ['released', 'revoked']:
value['status'] = 'revoked'
else:
value['status'] = 'deleted'
# http://redmine.encodedcc.org/issues/568
output_type_dict = {
'': 'raw data',
'Alignments': 'alignments',
'bigBed': 'sites',
'bigWig': 'sites',
'bedgraph': 'sites',
'hic': 'sites',
'Clusters': 'clusters',
'Contigs': 'contigs',
'FastqRd1': 'reads',
'FastqRd2': 'reads',
'forebrain_enhancers': 'enhancers_forebrain',
'heart_enhancers': 'enhancers_heart',
'GreenIdat': 'idat green file',
'hotspot_broad_peaks': 'hotspots',
'hotspot_narrow_peaks': 'hotspots',
'hotspot_signal': 'hotspots',
'Hotspots': 'hotspots',
'Interactions': 'interactions',
'MinusRawSignal': 'raw minus signal',
'PlusRawSignal': 'raw plus signal',
'macs2_dnase_peaks': 'peaks',
'macs2_dnase_signal': 'signal',
'MinusSignal': 'minus signal',
'minusSignal': 'minus signal',
'MultiMinus': 'multi-read minus signal',
'MultiPlus': 'multi-read plus signal',
'MultiSignal': 'multi-read signal',
'MultiUnstranded': 'multi-read signal',
'RawData2': 'reads',
'RedIdat': 'idat red file',
'peak': 'peaks',
'PeakCalls': 'peaks',
'Peaks': 'peaks',
'PlusSignal': 'plus signal',
'plusSignal': 'plus signal',
'predicted_enhancers_heart': 'enhancers_heart',
'RawSignal': 'raw signal',
'RawData': 'raw data',
'rcc': 'raw data',
'Read': 'reads',
'read': 'reads',
'read1': 'reads',
'rejected_reads': 'rejected reads',
'RepPeaks': 'peaks',
'RepSignal': 'signal',
'Signal': 'signal',
'SimpleSignal': 'signal',
'Sites': 'sites',
'Spikeins': 'spike-ins',
'Spikes': 'spike-ins',
'Splices': 'splice junctions',
'uniqueReads': 'unique signal',
'UniqueSignal': 'unique signal',
'uniqueSignal': 'unique signal',
'UniqueMinus': 'unique minus signal',
'uniqueMinusSignal': 'unique minus signal',
'UniquePlus': 'unique plus signal',
'uniquePlusSignal': 'unique plus signal',
'UniqueUnstranded': 'unique signal',
'UnstrandedSignal': 'signal',
'dataset_used': 'enhancers',
'TRAINING_DATA_MOUSE_VISTA': 'enhancers',
'method_description': 'enhancers',
'unknown': 'enhancers',
'Protocol': 'raw data',
}
current_output_type = value['output_type']
if current_output_type in output_type_dict:
value['output_type'] = output_type_dict[current_output_type]
# Help the raw data problem
if value['output_type'] == 'raw data' and value['file_format'] == 'fastq':
value['output_type'] = 'reads'
@upgrade_step('file', '3', '4')
def file_3_4(value, system):
# http://redmine.encodedcc.org/issues/1714
context = system['context']
root = find_root(context)
dataset = root.get_by_uuid(value['dataset']).upgrade_properties()
if 'download_path' in value:
value.pop('download_path')
value['lab'] = dataset['lab']
value['award'] = dataset['award']
# EDW User
if value.get('submitted_by') == '0e04cd39-006b-4b4a-afb3-b6d76c4182ff':
value['lab'] = 'fb0af3d0-3a4c-4e96-b67a-f273fe527b04'
value['award'] = '8bafd685-aa17-43fe-95aa-37bc1c90074a'
@upgrade_step('file', '4', '5')
def file_4_5(value, system):
# http://redmine.encodedcc.org/issues/2566
# http://redmine.encodedcc.org/issues/2565
# we need to remeber bedRnaElements,
bed_files = {
'bed_bedLogR': 'bedLogR',
'bed_bedMethyl': 'bedMethyl',
'bed_broadPeak': 'broadPeak',
'bed_gappedPeak': 'gappedPeak',
'bed_narrowPeak': 'narrowPeak',
'bed_bedRnaElements': 'bedRnaElements'
}
bigBed_files = [
'bedLogR',
'bedMethyl',
'broadPeak',
'narrowPeak',
'gappedPeak',
'bedRnaElements'
]
current = value['file_format']
if current in ['bed', 'bigBed']:
value['file_format_type'] = 'unknown'
# we do not know what those formats were, wranglers will need to investigate
elif current in bigBed_files:
value['file_format_type'] = current
value['file_format'] = 'bigBed'
elif current in bed_files:
value['file_format_type'] = bed_files[current]
value['file_format'] = 'bed'
elif current in ['gff']:
value['file_format_type'] = 'unknown'
# all gffs todate were in gff3, but we wouldn't know without wranglers checking
# classify the peptide stuff
if value['output_type'] in ['mPepMapGcFt', 'mPepMapGcUnFt']:
value['file_format_type'] = 'modPepMap'
elif value['output_type'] in ['pepMapGcFt', 'pepMapGcUnFt']:
value['file_format_type'] = 'pepMap'
# http://redmine.encodedcc.org/issues/2565
output_mapping = {
# Category: Raw data
'idat green file': 'idat green channel',
'idat red file': 'idat red channel',
'reads': 'reads',
'rejected reads': 'rejected reads',
'rcc': 'reporter code counts',
'CEL': 'intensity values',
'raw data': 'raw data',
'alignments': 'alignments',
'transcriptome alignments': 'transcriptome alignments',
'spike-ins': 'spike-in alignments',
'multi-read minus signal': 'minus strand signal of multi-mapped reads',
'multi-read plus signal': 'plus strand signal of multi-mapped reads',
'multi-read signal': 'signal of multi-mapped reads',
'multi-read normalized signal': 'normalized signal of multi-mapped reads',
'raw minus signal': 'raw minus strand signal',
'raw plus signal': 'raw plus strand signal',
'raw signal': 'raw signal',
'raw normalized signal': 'raw normalized signal',
'unique minus signal': 'minus strand signal of unique reads',
'unique plus signal': 'plus strand signal of unique reads',
'unique signal': 'signal of unique reads',
'signal': 'signal',
'minus signal': 'minus strand signal',
'plus signal': 'plus strand signal',
'Base_Overlap_Signal': 'base overlap signal',
'PctSignal': 'percentage normalized signal',
'SumSignal': 'summed densities signal',
'WaveSignal': 'wavelet-smoothed signal',
'signal p-value': 'signal p-value',
'fold change over control': 'fold change over control',
'gene read counts': 'gene read counts',
'enrichment': 'enrichment',
'exon quantifications': 'exon quantifications',
'ExonsDeNovo': 'exon quantifications',
'ExonsEnsV65IAcuff': 'exon quantifications',
'ExonsGencV10': 'exon quantifications',
'ExonsGencV3c': 'exon quantifications',
'ExonsGencV7': 'exon quantifications',
'GeneDeNovo': 'gene quantifications',
'GeneEnsV65IAcuff': 'gene quantifications',
'GeneGencV10': 'gene quantifications',
'GeneGencV3c': 'gene quantifications',
'GeneGencV7': 'gene quantifications',
'genome quantifications': 'gene quantifications',
'library_fraction': 'library fraction',
'transcript quantifications': 'transcript quantifications',
'TranscriptDeNovo': 'transcript quantifications',
'TranscriptEnsV65IAcuff': 'transcript quantifications',
'TranscriptGencV10': 'transcript quantifications',
'TranscriptGencV3c': 'transcript quantifications',
'TranscriptGencV7': 'transcript quantifications',
'mPepMapGcFt': 'filtered modified peptide quantification',
'mPepMapGcUnFt': 'unfiltered modified peptide quantification',
'pepMapGcFt': 'filtered peptide quantification',
'pepMapGcUnFt': 'unfiltered peptide quantification',
'clusters': 'clusters',
'CNV': 'copy number variation',
'contigs': 'contigs',
'enhancer validation': 'enhancer validation',
'FiltTransfrags': 'filtered transcribed fragments',
'hotspots': 'hotspots',
'Junctions': 'splice junctions',
'interactions': 'long range chromatin interactions',
'Matrix': 'long range chromatin interactions',
'PrimerPeaks': 'long range chromatin interactions',
'sites': 'methylation state at CpG',
'methyl CG': 'methylation state at CpG',
'methyl CHG': 'methylation state at CHG',
'methyl CHH': 'methylation state at CHH',
'peaks': 'peaks',
'replicated peaks': 'replicated peaks',
'RbpAssocRna': 'RNA-binding protein associated mRNAs',
'splice junctions': 'splice junctions',
'Transfrags': 'transcribed fragments',
'TssGencV3c': 'transcription start sites',
'TssGencV7': 'transcription start sites',
'Valleys': 'valleys',
'Alignability': 'sequence alignability',
'Excludable': 'blacklisted regions',
'Uniqueness': 'sequence uniqueness',
'genome index': 'genome index',
'genome reference': 'genome reference',
'Primer': 'primer sequence',
'spike-in sequence': 'spike-in sequence',
'reference': 'reference',
'enhancers': 'predicted enhancers',
'enhancers_forebrain': 'predicted forebrain enhancers',
'enhancers_heart': 'predicted heart enhancers',
'enhancers_wholebrain': 'predicted whole brain enhancers',
'TssHmm': 'predicted transcription start sites',
'UniformlyProcessedPeakCalls': 'optimal idr thresholded peaks',
'Validation': 'validation',
'HMM': 'HMM predicted chromatin state'
}
old_output_type = value['output_type']
# The peptide mapping files from UCSC all assumed V10 hg19
if old_output_type in ['mPepMapGcFt', 'mPepMapGcUnFt', 'pepMapGcFt', 'pepMapGcUnFt']:
value['genome_annotation'] = 'V10'
value['assembly'] = 'hg19'
elif old_output_type in ['ExonsEnsV65IAcuff', 'GeneEnsV65IAcuff', 'TranscriptEnsV65IAcuff']:
value['genome_annotation'] = 'ENSEMBL V65'
elif old_output_type in ['ExonsGencV3c', 'GeneGencV3c', 'TranscriptGencV3c', 'TssGencV3c']:
value['genome_annotation'] = 'V3c'
elif old_output_type in ['ExonsGencV7', 'GeneGenc7', 'TranscriptGencV7', 'TssGencV7']:
value['genome_annotation'] = 'V7'
elif old_output_type in ['ExonsGencV10', 'GeneGenc10', 'TranscriptGencV10', 'TssGencV10']:
value['genome_annotation'] = 'V10'
elif old_output_type in ['spike-ins'] and value['file_format'] == 'fasta':
old_output_type = 'spike-in sequence'
elif old_output_type in ['raw data'] and value['file_format'] in ['fastq', 'csfasta', 'csqual', 'fasta']:
old_output_type = 'reads'
elif old_output_type in ['raw data'] and value['file_format'] in ['CEL', 'tar']:
old_output_type = 'CEL'
elif old_output_type in ['raw data'] and value['file_format'] in ['rcc']:
old_output_type = 'rcc'
elif old_output_type in ['raw data'] and value['lab'] == '/labs/timothy-hubbard/':
old_output_type = 'reference'
elif old_output_type in ['raw data']:
if 'These are protocol documents' in value.get('notes', ''):
old_output_type = 'reference'
elif old_output_type == 'sites' and value['file_format'] == 'tsv':
old_output_type = 'interactions'
elif old_output_type in ['Validation'] and value['file_format'] == '2bit':
old_output_type = 'genome reference'
value['output_type'] = output_mapping[old_output_type]
# label the lost bedRnaElements files #2940
bedRnaElements_files = [
'transcript quantifications',
'gene quantifications',
'exon quantifications'
]
if (
value['output_type'] in bedRnaElements_files
and value['status'] in ['deleted', 'replaced']
and value['file_format'] == 'bigBed'
and value['file_format_type'] == 'unknown'
):
value['file_format_type'] = 'bedRnaElements'
# Get the replicate information
if value.get('file_format') in ['fastq', 'fasta', 'csfasta']:
context = system['context']
root = find_root(context)
if 'replicate' in value:
replicate = root.get_by_uuid(value['replicate']).upgrade_properties()
if 'read_length' not in value:
value['read_length'] = replicate.get('read_length')
if value['read_length'] is None:
del value['read_length']
run_type_dict = {
True: 'paired-ended',
False: 'single-ended',
None: 'unknown'
}
if 'run_type' not in value:
value['run_type'] = run_type_dict[replicate.get('paired_ended')]
if value.get('paired_end') in ['2']:
value['run_type'] = 'paired-ended'
# Backfill content_md5sum #2683
if 'content_md5sum' not in value:
md5sum_content_md5sum = system['registry'].get('backfill_2683', {})
if value['md5sum'] in md5sum_content_md5sum:
value['content_md5sum'] = md5sum_content_md5sum[value['md5sum']]
@upgrade_step('file', '5', '6')
def file_5_6(value, system):
# http://redmine.encodedcc.org/issues/3019
import re
if value.get('output_type') in [
'minus strand signal of multi-mapped reads',
'plus strand signal of multi-mapped reads',
'signal of multi-mapped reads',
'normalized signal of multi-mapped reads'
]:
value['output_type'] = re.sub('multi-mapped', 'all', value['output_type'])
@upgrade_step('file', '6', '7')
def file_6_7(value, system):
# http://redmine.encodedcc.org/issues/3063
if 'file_format_specifications' in value:
value['file_format_specifications'] = list(set(value['file_format_specifications']))
if 'controlled_by' in value:
value['controlled_by'] = list(set(value['controlled_by']))
if 'derived_from' in value:
value['derived_from'] = list(set(value['derived_from']))
if 'supercedes' in value:
value['supercedes'] = list(set(value['supersedes']))
if 'aliases' in value:
value['aliases'] = list(set(value['aliases']))
@upgrade_step('file', '7', '8')
def file_7_8(value, system):
return
@upgrade_step('file', '8', '9')
def file_8_9(value, system):
# http://redmine.encodedcc.org/issues/4183
if (value['file_format'] == 'fastq') and ('assembly' in value):
value.pop('assembly')
# http://redmine.encodedcc.org/issues/1859
if 'supercedes' in value:
value['supersedes'] = value['supercedes']
value.pop('supercedes', None)
def set_to_midnight(date_string):
release_date = datetime.strptime(date_string, '%Y-%m-%d')
min_pub_date_time = datetime.combine(release_date, time.min)
return '{:%Y-%m-%dT%H:%M:%S.%f+00:00}'.format(min_pub_date_time)
@upgrade_step('file', '10', '11')
def file_10_11(value, system):
# http://redmine.encodedcc.org/issues/5021
# http://redmine.encodedcc.org/issues/4929
# http://redmine.encodedcc.org/issues/4927
# http://redmine.encodedcc.org/issues/4903
# http://redmine.encodedcc.org/issues/4904
date_created = value.get('date_created')
if date_created.find('T') == -1:
value['date_created'] = set_to_midnight(date_created)
# http://redmine.encodedcc.org/issues/4748
aliases = []
if 'aliases' in value and value['aliases']:
aliases = value['aliases']
else:
return
aliases_to_remove = []
for i in range(0, len(aliases)):
new_alias = ''
if 'roadmap-epigenomics' in aliases[i]:
if '||' in aliases[i]:
scrub_parts = aliases[i].split('||')
date_split = scrub_parts[1].split(' ')
date = "-".join([date_split[1].strip(),
date_split[2].strip(),
date_split[5].strip()])
scrubbed_list = [scrub_parts[0].strip(), date.strip(), scrub_parts[2].strip()]
if len(scrub_parts) == 4:
scrubbed_list.append(scrub_parts[3].strip())
new_alias = '_'.join(scrubbed_list)
parts = aliases[i].split(':') if not new_alias else new_alias.split(':')
namespace = parts[0]
if namespace in ['ucsc_encode_db', 'UCSC_encode_db', 'versionof']:
# Remove the alias with the bad namespace
aliases_to_remove.append(aliases[i])
namespace = 'encode'
if namespace in ['CGC']:
namespace = namespace.lower()
rest = '_'.join(parts[1:]).strip()
# Remove or substitute bad characters and multiple whitespaces
import re
if '"' or '#' or '@' or '!' or '$' or '^' or '&' or '|' or '~' or ';' or '`' in rest:
rest = re.sub(r'[\"#@!$^&|~;`\/\\]', '', rest)
rest = ' '.join(rest.split())
if '%' in rest:
rest = re.sub(r'%', 'pct', rest)
if '[' or '{' in rest:
rest = re.sub('[\[{]', '(', rest)
if ']' or '}' in rest:
rest = re.sub('[\]}]', ')', rest)
new_alias = ':'.join([namespace, rest])
if new_alias not in aliases:
aliases[i] = new_alias
if aliases_to_remove and aliases:
for a in aliases_to_remove:
if a in aliases:
aliases.remove(a)
@upgrade_step('file', '10', '11')
def file_10_11(value, system):
# http://redmine.encodedcc.org/issues/5081
# http://redmine.encodedcc.org/issues/5049
# http://redmine.encodedcc.org/issues/4924
if not value.get('no_file_available'):
value['no_file_available'] = False
@upgrade_step('file', '11', '12')
def file_11_12(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-3347
return
| mit | -7,386,508,491,949,481,000 | 38.93617 | 109 | 0.544752 | false |
tensorflow/ranking | tensorflow_ranking/python/keras/canned/dnn.py | 1 | 5354 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DNN Ranking network in Keras."""
import tensorflow.compat.v2 as tf
from tensorflow_ranking.python.keras import network as network_lib
class DNNRankingNetwork(network_lib.UnivariateRankingNetwork):
"""Deep Neural Network (DNN) scoring based univariate ranking network."""
def __init__(self,
context_feature_columns=None,
example_feature_columns=None,
hidden_layer_dims=None,
activation=None,
use_batch_norm=True,
batch_norm_moment=0.999,
dropout=0.5,
name='dnn_ranking_network',
**kwargs):
"""Initializes an instance of DNN ranking network.
This network consists of feedforward linear units passed through a
non-linear
activation. The hidden size of the linear units and the activation are
specified by the user.
Args:
context_feature_columns: A dict containing all the context feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
example_feature_columns: A dict containing all the example feature columns
used by the network. Keys are feature names, and values are instances of
classes derived from `_FeatureColumn`.
hidden_layer_dims: Iterable of number hidden units per layer. All layers
are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
activation: Activation function applied to each layer. If `None`, will use
an identity activation, which is default behavior in Keras activations.
use_batch_norm: Whether to use batch normalization after each hidden
layer.
batch_norm_moment: Momentum for the moving average in batch normalization.
dropout: When not `None`, the probability we will drop out a given
coordinate.
name: name of Keras network.
**kwargs: keyword arguments.
Raises:
`ValueError` if `example_feature_columns` or `hidden_layer_dims` is empty.
"""
if not example_feature_columns or not hidden_layer_dims:
raise ValueError('example_feature_columns or hidden_layer_dims must not '
'be empty.')
super(DNNRankingNetwork, self).__init__(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
name=name,
**kwargs)
self._hidden_layer_dims = [int(d) for d in hidden_layer_dims]
self._activation = activation
self._use_batch_norm = use_batch_norm
self._batch_norm_moment = batch_norm_moment
self._dropout = dropout
layers = []
if self._use_batch_norm:
layers.append(
tf.keras.layers.BatchNormalization(momentum=self._batch_norm_moment))
for _, layer_width in enumerate(self._hidden_layer_dims):
layers.append(tf.keras.layers.Dense(units=layer_width))
if self._use_batch_norm:
layers.append(
tf.keras.layers.BatchNormalization(
momentum=self._batch_norm_moment))
layers.append(tf.keras.layers.Activation(activation=self._activation))
layers.append(tf.keras.layers.Dropout(rate=self._dropout))
self._scoring_layers = layers
self._output_score_layer = tf.keras.layers.Dense(units=1)
def score(self, context_features=None, example_features=None, training=True):
"""Univariate scoring of context and one example to generate a score.
Args:
context_features: (dict) context feature names to 2D tensors of shape
[batch_size, ...].
example_features: (dict) example feature names to 2D tensors of shape
[batch_size, ...].
training: (bool) whether in training or inference mode.
Returns:
(tf.Tensor) A score tensor of shape [batch_size, 1].
"""
context_input = [
tf.keras.layers.Flatten()(context_features[name])
for name in sorted(self.context_feature_columns)
]
example_input = [
tf.keras.layers.Flatten()(example_features[name])
for name in sorted(self.example_feature_columns)
]
inputs = tf.concat(context_input + example_input, 1)
outputs = inputs
for layer in self._scoring_layers:
outputs = layer(outputs, training=training)
return self._output_score_layer(outputs, training=training)
def get_config(self):
config = super(DNNRankingNetwork, self).get_config()
config.update({
'hidden_layer_dims': self._hidden_layer_dims,
'activation': self._activation,
'use_batch_norm': self._use_batch_norm,
'batch_norm_moment': self._batch_norm_moment,
'dropout': self._dropout,
})
return config
| apache-2.0 | -1,788,232,522,490,547,200 | 38.080292 | 80 | 0.675196 | false |
berjc/aus-senate-audit | aus_senate_audit/audits/bayesian_audit.py | 1 | 5902 | # -*- coding: utf-8 -*-
""" Implements the Bayesian Audit. """
from collections import Counter
from itertools import chain
from random import gammavariate
from random import seed as set_seed
from time import time
def get_new_ballot_weights(election, r):
""" Returns new ballot weights for the given election.
The new ballot weights are constructed using Gamma Variates to draw from a Dirichlet distribution over existing
ballots, based on existing ballot weights. The sum of the new ballot weights should be equal to :param:`r`
(approximately). Note that ballot weights are rounded down.
:param :class:`BaseSenateElection` election: The senate election to generate new ballot weights for.
:param int r: The sum of the new ballot weights.
:returns: The new ballot weights generated using Gamma Variates.
:rtype: dict
"""
new_ballot_weights = {}
total = 0
for ballot in election.get_ballots():
weight = election.get_ballot_weight(ballot)
new_ballot_weights[ballot] = gammavariate(weight, 1) if weight else 0
total += new_ballot_weights[ballot]
for ballot in election.get_ballots():
new_ballot_weights[ballot] = int(r * new_ballot_weights[ballot] / total)
return new_ballot_weights
def audit(election, seed, unpopular_freq_threshold, stage_counter=0, alpha=0.05, trials=100, quick=False):
""" Runs a Bayesian audit on the given senate election.
:param :class:`BaseSenateElection` election: The senate election to audit.
:param int seed: The seed for the random number generator.
:param float unpopular_freq_threshold: The upper bound on the frequency of trials a candidate is elected in order
for the candidate to be deemed unpopular.
:param int stage_counter: The current audit stage (default: 0).
:param float alpha: The error tolerance for the given audit (default: 0.05).
:param int trials: The number of trials performed per sample (default: 100).
:param bool quick: A boolean indicating whether the audit should run to completion (True) or only run one stage
(False) (default: False).
"""
print(
'Audit of {} election.\n'.format(election.get_type()),
' Election ID: {}\n'.format(election.get_election_id()),
' Candidates: {}\n'.format(election.get_candidates()),
' Number of ballots cast: {}\n'.format(election.get_num_cast_ballots()),
' Number of seats being contested: {}\n'.format(election.get_num_seats()),
' Number of trials per sample: {}\n'.format(trials),
' Random number seed: {}'.format(seed),
)
start_time = time()
set_seed(seed)
# Cast one "prior" ballot for each candidate to establish a Bayesian prior. The prior ballot is a length-one partial
# ballot with just a first choice vote for that candidate.
for cid in election.get_candidate_ids():
election.add_ballot((cid,), 1)
# Mapping from candidates to the set of ballots that elected them.
candidate_to_ballots_map = {}
candidate_outcomes = None
done = False
while True:
stage_counter += 1
election.draw_ballots() # Increase sample of cast ballots.
print(
'\nAudit stage number: {}\n'.format(stage_counter),
' Sample size (including prior ballots): {}\n'.format(election.get_num_ballots_drawn()),
)
# -- Run trials in a Bayesian manner --
# Each outcome is a tuple of candidates who have been elected in lexicographical order (NOT the order in which
# they were elected).
print(' Performing {} Bayesian trials (posterior-based election simulations) in this stage.'.format(trials))
outcomes = []
for _ in range(trials):
new_ballot_weights = get_new_ballot_weights(election, election.get_num_cast_ballots())
outcome = election.get_outcome(new_ballot_weights)
for cid in outcome:
if cid not in candidate_to_ballots_map:
candidate_to_ballots_map[cid] = new_ballot_weights
outcomes.append(outcome)
best, freq = Counter(outcomes).most_common(1)[0]
print(
' Most common outcome ({} seats):\n'.format(election.get_num_seats()),
' {}\n'.format(best),
' Frequency of most common outcome: {} / {}'.format(freq, trials),
)
candidate_outcomes = Counter(chain(*outcomes))
print(
' Fraction present in outcome by candidate:\n {}'.format(
', '.join([
'{}: {}'.format(str(cid), cid_freq / trials)
for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0]))
]),
),
)
if freq >= trials * (1 - alpha):
print(
'Stopping because audit confirmed outcome:\n',
' {}\n'.format(best),
'Total number of ballots examined: {}'.format(election.get_num_ballots_drawn()),
)
done = True
break
if election.get_num_ballots_drawn() >= election.get_num_cast_ballots():
print('Audit has looked at all ballots. Done.')
done = True
break
if not quick:
break
if candidate_outcomes is not None and done:
for cid, cid_freq in sorted(candidate_outcomes.items(), key=lambda x: (x[1], x[0])):
if cid_freq / trials < unpopular_freq_threshold:
print(
' One set of ballots that elected low frequency '
'candidate {} which occurred in {}% of outcomes\n'.format(str(cid), str(cid_freq)),
' {}'.format(candidate_to_ballots_map[cid]),
)
print('Elapsed time: {} seconds.'.format(time() - start_time))
return done
| apache-2.0 | 373,211,840,810,849,340 | 42.080292 | 120 | 0.619282 | false |
zenofewords/zenofewords | tests/utils.py | 1 | 1705 | from copy import copy
from django.conf import settings
from django.urls import reverse
def get_permissions(response_mapping, custom_mapping):
"""
Build permission mappings.
:param response_mapping: usually a predefined permission template (FORBIDDEN, NOT_FOUND, etc.)
:type response_mapping: dict
:param custom_mapping: key/value pairs which need to be customised
:type custom_mapping: dict
:returns: a new response method and status code mapping
:rtype: dict
"""
response_mapping = copy(response_mapping)
response_mapping.update(custom_mapping)
return response_mapping
def assert_permissions(client_type, response_code_mapping, client_mapping, url_reverse):
"""
Test URL response depending on client type.
:param client_type: type of client (anonymous, user, admin, etc.)
:type client_type: string
:param response_code_mapping: request type with a matching response code
:type response_code_mapping: dict
:param client_mapping: a fixture that contains client types
:type client_mapping: dict
:param url_reverse: tuple of reverse strings for URLs which receive requests
:type url_reverse: tuple
"""
for method in response_code_mapping.keys():
for url in url_reverse:
response_code = getattr(
client_mapping[client_type], method
)(reverse(url), secure=not settings.DEBUG).status_code
assert response_code == response_code_mapping[method], print(
'client: {}, method: {}, received: {}, expected: {}'.format(
client_type, method, response_code, response_code_mapping[method]
)
)
| mit | -4,056,573,595,075,421,000 | 35.276596 | 98 | 0.674487 | false |
lmazuel/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py | 1 | 6307 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationHealthStateFilter(Model):
"""Defines matching criteria to determine whether a application should be
included in the cluster health chunk.
One filter can match zero, one or multiple applications, depending on its
properties.
.
:param application_name_filter: The name of the application that matches
the filter, as a fabric uri. The filter is applied only to the specified
application, if it exists.
If the application doesn't exist, no application is returned in the
cluster health chunk based on this filter.
If the application exists, it is included in the cluster health chunk if
it respects the other filter properties.
If not specified, all applications are matched against the other filter
members, like health state filter.
:type application_name_filter: str
:param application_type_name_filter: The name of the application type that
matches the filter.
If specified, the filter is applied only to applications of the selected
application type, if any exists.
If no applications of the specified application type exists, no
application is returned in the cluster health chunk based on this filter.
Each application of the specified application type is included in the
cluster health chunk if it respects the other filter properties.
If not specified, all applications are matched against the other filter
members, like health state filter.
:type application_type_name_filter: str
:param health_state_filter: The filter for the health state of the
applications. It allows selecting applications if they match the desired
health states.
The possible values are integer value of one of the following health
states. Only applications that match the filter are returned. All
applications are used to evaluate the cluster aggregated health state.
If not specified, default value is None, unless the application name or
the application type name are specified. If the filter has default value
and application name is specified, the matching application is returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6, it matches applications with
HealthState value of OK (2) and Warning (4).
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in order to
return no results on a given collection of states. The value is 1.
- Ok - Filter that matches input with HealthState value Ok. The value is
2.
- Warning - Filter that matches input with HealthState value Warning. The
value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The value is
65535.
. Default value: 0 .
:type health_state_filter: int
:param service_filters: Defines a list of filters that specify which
services to be included in the returned cluster health chunk as children
of the application. The services are returned only if the parent
application matches a filter.
If the list is empty, no services are returned. All the services are used
to evaluate the parent application aggregated health state, regardless of
the input filters.
The application filter may specify multiple service filters.
For example, it can specify a filter to return all services with health
state Error and another filter to always include a service identified by
its service name.
:type service_filters:
list[~azure.servicefabric.models.ServiceHealthStateFilter]
:param deployed_application_filters: Defines a list of filters that
specify which deployed applications to be included in the returned cluster
health chunk as children of the application. The deployed applications are
returned only if the parent application matches a filter.
If the list is empty, no deployed applications are returned. All the
deployed applications are used to evaluate the parent application
aggregated health state, regardless of the input filters.
The application filter may specify multiple deployed application filters.
For example, it can specify a filter to return all deployed applications
with health state Error and another filter to always include a deployed
application on a specified node.
:type deployed_application_filters:
list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter]
"""
_attribute_map = {
'application_name_filter': {'key': 'ApplicationNameFilter', 'type': 'str'},
'application_type_name_filter': {'key': 'ApplicationTypeNameFilter', 'type': 'str'},
'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'},
'service_filters': {'key': 'ServiceFilters', 'type': '[ServiceHealthStateFilter]'},
'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'},
}
def __init__(self, application_name_filter=None, application_type_name_filter=None, health_state_filter=0, service_filters=None, deployed_application_filters=None):
super(ApplicationHealthStateFilter, self).__init__()
self.application_name_filter = application_name_filter
self.application_type_name_filter = application_type_name_filter
self.health_state_filter = health_state_filter
self.service_filters = service_filters
self.deployed_application_filters = deployed_application_filters
| mit | -3,039,962,441,478,437,400 | 55.81982 | 168 | 0.721896 | false |
guillaume-philippon/aquilon | lib/aquilon/worker/formats/chassis.py | 1 | 1715 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chassis formatter."""
from aquilon.aqdb.model import Chassis
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.hardware_entity import HardwareEntityFormatter
class ChassisFormatter(HardwareEntityFormatter):
def format_raw(self, chassis, indent="", embedded=True,
indirect_attrs=True):
details = [super(ChassisFormatter, self).format_raw(chassis, indent)]
for slot in chassis.slots:
if slot.machine:
if slot.machine.primary_name:
hostname = slot.machine.primary_name
else:
hostname = "no hostname"
details.append(indent + " Slot #%d: %s (%s)" %
(slot.slot_number, slot.machine.label, hostname))
else:
details.append(indent + " Slot #%d: Empty" % slot.slot_number)
return "\n".join(details)
ObjectFormatter.handlers[Chassis] = ChassisFormatter()
| apache-2.0 | 9,082,157,566,535,196,000 | 38.883721 | 80 | 0.668222 | false |
bcarr092/CAHAL | test/test_driver.py | 1 | 1595 | try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import cahal_tests
import unittest
import sys
from test_cahal import TestsCAHAL
from test_cahal_device import TestsCAHALDevice
from test_cahal_device_stream import TestsCAHALDeviceStream
from test_cahal_audio_format_description import \
TestsCAHALAudioFormatDescription
cahal_tests.cpc_log_set_log_level( cahal_tests.CPC_LOG_LEVEL_NO_LOGGING )
cahal_tests.python_cahal_initialize()
alltests = unittest.TestSuite ( [ \
unittest.TestLoader().loadTestsFromTestCase( TestsCAHAL ), \
unittest.TestLoader().loadTestsFromTestCase( TestsCAHALDevice ), \
unittest.TestLoader().loadTestsFromTestCase( TestsCAHALDeviceStream ), \
unittest.TestLoader().loadTestsFromTestCase ( \
TestsCAHALAudioFormatDescription \
) \
] )
result = unittest.TextTestRunner( verbosity=2 ).run( alltests )
cahal_tests.cahal_terminate()
if( len( result.errors ) == 0 and len( result.failures ) == 0 ):
sys.exit( 0 )
else:
sys.exit( -1 )
| apache-2.0 | -5,277,244,177,594,108,000 | 43.305556 | 85 | 0.497179 | false |
mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/rnn_translator/pytorch/seq2seq/data/sampler.py | 1 | 5047 | import logging
import torch
from torch.utils.data.sampler import Sampler
from mlperf_compliance import mlperf_log
from seq2seq.utils import gnmt_print
from seq2seq.utils import get_world_size, get_rank
class BucketingSampler(Sampler):
"""
Distributed data sampler supporting bucketing by sequence length.
"""
def __init__(self, dataset, batch_size, seeds, bucketing=True,
world_size=None, rank=None):
"""
Constructor for the BucketingSampler.
:param dataset: dataset
:param batch_size: batch size
:param bucketing: if True enables bucketing by sequence length
:param world_size: number of processes participating in distributed
training
:param rank: rank of the current process within world_size
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.bucketing = bucketing
self.seeds = seeds
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def __iter__(self):
gnmt_print(key=mlperf_log.INPUT_ORDER)
# deterministically shuffle based on epoch
g = torch.Generator()
seed = self.seeds[self.epoch]
logging.info(f'Sampler for epoch {self.epoch} uses seed {seed}')
g.manual_seed(seed)
# generate permutation
indices = torch.randperm(self.data_len, generator=g)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# splits the dataset into chunks of 'batches_in_shard' global batches
# each, sorts by (src + tgt) sequence length within each chunk,
# reshuffles all global batches
if self.bucketing:
batches_in_shard = 80
shard_size = self.global_batch_size * batches_in_shard
gnmt_print(key=mlperf_log.INPUT_SHARD, value=shard_size)
nshards = (self.num_samples + shard_size - 1) // shard_size
lengths = self.dataset.lengths[indices]
shards = [indices[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
len_shards = [lengths[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
indices = []
for len_shard in len_shards:
_, ind = len_shard.sort()
indices.append(ind)
output = tuple(shard[idx] for shard, idx in zip(shards, indices))
indices = torch.cat(output)
# global reshuffle
indices = indices.view(-1, self.global_batch_size)
order = torch.randperm(indices.shape[0], generator=g)
indices = indices[order, :]
indices = indices.view(-1)
assert len(indices) == self.num_samples
# build indices for each individual worker
# consecutive ranks are getting consecutive batches,
# default pytorch DistributedSampler assigns strided batches
# with offset = length / world_size
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return iter(indices)
def __len__(self):
return self.num_samples // self.world_size
def set_epoch(self, epoch):
"""
Sets current epoch index. This value is used to seed RNGs in __iter__()
function.
:param epoch: index of current epoch
"""
self.epoch = epoch
class StaticDistributedSampler(Sampler):
def __init__(self, dataset, batch_size, pad, world_size=None, rank=None):
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.world_size = world_size
global_batch_size = batch_size * world_size
data_len = len(dataset)
num_samples = (data_len + global_batch_size - 1) \
// global_batch_size * global_batch_size
self.num_samples = num_samples
indices = list(range(data_len))
if pad:
indices += [0] * (num_samples - len(indices))
else:
indices += [-1] * (num_samples - len(indices))
indices = torch.tensor(indices)
indices = indices.view(-1, batch_size)
indices = indices[rank::world_size].contiguous()
indices = indices.view(-1)
indices = indices[indices != -1]
indices = indices.tolist()
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
| apache-2.0 | -4,972,239,737,936,623,000 | 32.646667 | 93 | 0.597781 | false |
smenon8/AlgDataStruct_practice | practice_problems/MorePractice.py | 1 | 2289 | # Divide and Conquer Algorithm for finding the maximum sub array sum
def maxSubArraySum(arr,h,t):
if h == t:
return arr[h]
m = (h+t)//2
# 1. find max in left subarray
leftSum = maxSubArraySum(arr,h,m)
# 2. find max in right subarray
rightSum = maxSubArraySum(arr,m+1,t)
# 3. find max in mid-point crossing
midPointSum = midPointCrossSum(arr,h,m,t)
return max(leftSum,rightSum,midPointSum)
def midPointCrossSum(arr,h,m,t):
# Adding the left sub-array from the mid-point to head till the sum is non-decreasing
sum = 0
leftSum = arr[m]
for i in range(m-1,h-1,-1):
sum += arr[i]
if sum > leftSum:
leftSum = sum
# Adding the right sub-array from the mid-point to tail till the sum is non-decreasing
sum = 0
rightSum = arr[m+1]
for i in range(m+2,t+1):
sum += arr[i]
if sum > rightSum:
rightSum = sum
return leftSum+rightSum
arr = [-2,-5,6,-2,-3,1,5,-6]
print("Maximum Sub Array Sum")
print(maxSubArraySum(arr,0,len(arr)-1))
print()
# Similar problem: Given a sum find the pair of numbers which add upto the sum
def twoSumProblemSort(arr,n):
arr.sort()
head = 0
tail = len(arr)-1
print(arr)
while head <= tail:
s = arr[head] + arr[tail]
if s == n:
return arr[head],arr[tail]
elif s < n:
head += 1
else:
tail -= 1
return False
arr = [6,8,2,3,10,11]
print("Two sum problem")
print(twoSumProblemSort(arr,10))
print()
'''
1. Highly depends on the pivot element i.e. the middle element.
2. If the middle element is smaller than both its neighbours, it will tend to finding the element in the left sub half
3. Otherwise right half's left part will get pre-dominance.
'''
def findPeakEle(arr,low,high,n):
mid = (low+high) // 2
# Handling the boundary cases
if mid == 0 or mid == n-1: # reached the first or the last element - boundary case
return arr[mid],mid
else:
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]: # definition of peak element
return arr[mid],mid
else:
if arr[mid] < arr[mid-1]: # peak element will lie to the left
return findPeakEle(arr,low,mid-1,n)
else:
if arr[mid] < arr[mid+1]: # peak element will lie to the right
return findPeakEle(arr,mid+1,high,n)
arr = [2,20,19,21,23,90,67]
n = len(arr)
print("Find peak element")
print(findPeakEle(arr,0,n-1,n))
print() | mit | 6,559,211,954,744,387,000 | 23.105263 | 118 | 0.671472 | false |
argv-minus-one/obnam | obnamlib/encryption.py | 1 | 7876 | # Copyright 2011 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import subprocess
import tempfile
import tracing
import obnamlib
class EncryptionError(obnamlib.ObnamError):
pass
class GpgError(EncryptionError):
msg = 'gpg failed with exit code {returncode}:\n{stderr}'
def generate_symmetric_key(numbits, filename='/dev/random'):
'''Generate a random key of at least numbits for symmetric encryption.'''
tracing.trace('numbits=%d', numbits)
bytes = (numbits + 7) / 8
f = open(filename, 'rb')
key = f.read(bytes)
f.close()
return key.encode('hex')
class SymmetricKeyCache(object):
'''Cache symmetric keys in memory.'''
def __init__(self):
self.clear()
def get(self, repo, toplevel):
if repo in self.repos and toplevel in self.repos[repo]:
return self.repos[repo][toplevel]
return None
def put(self, repo, toplevel, key):
if repo not in self.repos:
self.repos[repo] = {}
self.repos[repo][toplevel] = key
def clear(self):
self.repos = {}
def _gpg_pipe(args, data, passphrase):
'''Pipe things through gpg.
With the right args, this can be either an encryption or a decryption
operation.
For safety, we give the passphrase to gpg via a file descriptor.
The argument list is modified to include the relevant options for that.
The data is fed to gpg via a temporary file, readable only by
the owner, to avoid congested pipes.
'''
# Open pipe for passphrase, and write it there. If passphrase is
# very long (more than 4 KiB by default), this might block. A better
# implementation would be to have a loop around select(2) to do pipe
# I/O when it can be done without blocking. Patches most welcome.
keypipe = os.pipe()
os.write(keypipe[1], passphrase + '\n')
os.close(keypipe[1])
# Actually run gpg.
argv = ['gpg', '--passphrase-fd', str(keypipe[0]), '-q', '--batch',
'--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(data)
os.close(keypipe[0])
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def encrypt_symmetric(cleartext, key):
'''Encrypt data with symmetric encryption.'''
return _gpg_pipe(['-c'], cleartext, key)
def decrypt_symmetric(encrypted, key):
'''Decrypt encrypted data with symmetric encryption.'''
return _gpg_pipe(['-d'], encrypted, key)
def _gpg(args, stdin='', gpghome=None):
'''Run gpg and return its output.'''
env = dict()
env.update(os.environ)
if gpghome is not None:
env['GNUPGHOME'] = gpghome
tracing.trace('gpghome=%s' % gpghome)
argv = ['gpg', '-q', '--batch', '--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = p.communicate(stdin)
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def get_public_key(keyid, gpghome=None):
'''Return the ASCII armored export form of a given public key.'''
return _gpg(['--export', '--armor', keyid], gpghome=gpghome)
def get_public_key_user_ids(keyid, gpghome=None): # pragma: no cover
'''Return the ASCII armored export form of a given public key.'''
user_ids = []
output = _gpg(['--with-colons', '--list-keys', keyid], gpghome=gpghome)
for line in output.splitlines():
token = line.split(":")
if len(token) >= 10:
user_id = token[9].strip().replace(r'\x3a', ":")
if user_id:
user_ids.append(user_id)
return user_ids
class Keyring(object):
'''A simplistic representation of GnuPG keyrings.
Just enough functionality for obnam's purposes.
'''
_keyring_name = 'pubring.gpg'
def __init__(self, encoded=''):
self._encoded = encoded
self._gpghome = None
self._keyids = None
def _setup(self):
self._gpghome = tempfile.mkdtemp()
f = open(self._keyring, 'wb')
f.write(self._encoded)
f.close()
_gpg(['--import-ownertrust'], stdin='''\
# List of assigned trustvalues, created Sun 01 Dec 2013 19:13:26 GMT
# (Use "gpg --import-ownertrust" to restore them)
''', gpghome=self._gpghome)
def _cleanup(self):
shutil.rmtree(self._gpghome)
self._gpghome = None
@property
def _keyring(self):
return os.path.join(self._gpghome, self._keyring_name)
def _real_keyids(self):
output = self.gpg(False, ['--list-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'pub':
keyids.append(fields[4])
return keyids
def keyids(self):
if self._keyids is None:
self._keyids = self._real_keyids()
return self._keyids
def __str__(self):
return self._encoded
def __contains__(self, keyid):
return keyid in self.keyids()
def _reread_keyring(self):
f = open(self._keyring, 'rb')
self._encoded = f.read()
f.close()
self._keyids = None
def add(self, key):
self.gpg(True, ['--import'], stdin=key)
def remove(self, keyid):
self.gpg(True, ['--delete-key', '--yes', keyid])
def gpg(self, reread, *args, **kwargs):
self._setup()
kwargs['gpghome'] = self._gpghome
try:
result = _gpg(*args, **kwargs)
except BaseException: # pragma: no cover
self._cleanup()
raise
else:
if reread:
self._reread_keyring()
self._cleanup()
return result
class SecretKeyring(Keyring):
'''Same as Keyring, but for secret keys.'''
_keyring_name = 'secring.gpg'
def _real_keyids(self):
output = self.gpg(False, ['--list-secret-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'sec':
keyids.append(fields[4])
return keyids
def encrypt_with_keyring(cleartext, keyring):
'''Encrypt data with all keys in a keyring.'''
recipients = []
for keyid in keyring.keyids():
recipients += ['-r', keyid]
return keyring.gpg(False,
['-e',
'--trust-model', 'always',
'--no-encrypt-to',
'--no-default-recipient',
] + recipients,
stdin=cleartext)
def decrypt_with_secret_keys(encrypted, gpghome=None):
'''Decrypt data using secret keys GnuPG finds on its own.'''
return _gpg(['-d'], stdin=encrypted, gpghome=gpghome)
| gpl-3.0 | 1,874,813,231,981,704,400 | 27.536232 | 77 | 0.603225 | false |
tuxar-uk/Merlyn | Merlyn.py | 1 | 6041 | """ Merlyn Speech Control for PC
We load in commands (& spells) generated by lmtool.py
and also language files generated by the Sphinx lmtool
http://www.speech.cs.cmu.edu/tools/lmtool-new.html
then open up a stream of words from the mic via LiveSpeech
and try to parse it into commands and possibly some parameters.
If succesful, hand off to the OS.
The parsing will need improving as the syntax evolves...
Copyright 2017 Alan Richmond @ AILinux.net
The MIT License https://opensource.org/licenses/MIT
"""
import os
from subprocess import call
from pocketsphinx import LiveSpeech, get_model_path
class Merlyn:
""" Merlyn Speech Control for PC"
"""
def __init__(self, num):
""" init with the number given by the lmtool
"""
self.num = str(num)
self.mer = os.path.expanduser("~/Merlyn")
cmds = os.path.join(self.mer, 'cmds/all.txt')
lang = os.path.join(self.mer, 'lang/')
self.lm = os.path.join(lang, self.num + '.lm')
self.dic = os.path.join(lang, self.num + '.dic')
# Read in and store commands
try:
lines = open(cmds)
except IOError:
sys.exit("Could not open file " + cmds)
count = 0
self.commands = {}
for line in lines:
line = line.strip()
if len(line) > 1 and line[0] != "#": # skip over empty lines & comments
(cmd, spell) = line.split(":",1)
self.commands[cmd.strip().lower()] = spell.strip()
count += 1
def parse_the(self, cmd):
""" Parse the text command supplied by the PocketSphinx listener.
"""
self.cmd = cmd
self.spell = None
self.params = []
# start with the whole phrase
while self.cmd not in self.commands: # if not recognised then
words = self.cmd.split() # split up phrase into words
if len(words) < 2: break
word = words[-1] # split off last word
del words[-1]
# This is probably temporary. I'm assuming only integer params for now...
if word == "to": # Sphinx thinks user said 'to'
word = "two" # but more likely they said 'two'
elif word == "for": # Sphinx thinks user said 'for'
word = "four" # you get the idea...
self.params.append(word) # save words not part of the command
self.cmd = ' '.join(words).strip() # re-join words for possible command
if self.cmd not in self.commands:
return None
self.params.reverse() # above loop picked off words from right
self.spell = self.commands[self.cmd] # this is the spell that Merlyn will utter
if self.params: # are there some params?
par = ' '.join(self.params).strip() # join them back into a string
try: # for now I'm assuming ints only
num = str(text2int(par))
except:
print("Not a good num:", par)
try:
self.spell = self.spell % num # substitute in the spell
except: ok = False
return self.spell
def printcmd(self):
# print("<", self.cmd, self.params, ' {', self.spell, ' }')
print("<", self.cmd, self.params)
def parse_do(self, cmd):
""" Parse the command then do it.
"""
spell = self.parse_the(cmd)
if spell is None: return
self.printcmd()
try:
retcode = call(spell, shell=True) # here OS, do this!
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
def do_demo(self):
""" Run Merlyn's self-demo.
"""
demo = os.path.join(self.mer, 'demo/demo.sh &')
print(demo)
call(demo, shell=True) # here OS, do this!
def listen(self):
""" Top-level loop to get text from the user's microphone,
check for some special commands; if we're expecting a command then do it.
"""
print( "| Say 'Merlyn' to make him/her listen.\n\
| Merlyn will obey the next command. If that is 'keep listening' then\n\
| Merlyn will continue to obey commands until you say 'stop listening'.\n\
| Say 'help' to see this message again, and to get further help.")
listening = obey = first = False
# https://pypi.python.org/pypi/pocketsphinx
speech = LiveSpeech(hmm=os.path.join(get_model_path(), 'en-us'), lm=self.lm, dic=self.dic)
for spoken in speech: # get user's command
cmd = str(spoken).lower()
if cmd == 'merlyn': # need to hear my name before doing stuff
obey = True # flag to obey next command
first = True # obey flag will be toggled off after first use
elif cmd == 'keep listening': # or be told to keep listening for commands
listening = True
elif cmd == 'stop listening': # until told to stop
listening = False
obey = True # need to acknowledge the stop
elif cmd == 'exit': # we're done...
break
elif cmd == '': # somehow got an empty command
continue
if obey or listening: # attempt to recognise the command and params
self.parse_do(cmd)
if not first:
obey = False
first = False
# http://stackoverflow.com/questions/493173/is-there-a-way-to-convert-number-words-to-integers
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
current = result = 0
for word in textnum.split():
if word == 'full': word = 'four' # kludge
if word == 'q': word = 'two' # kludge
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
| mit | -4,345,104,676,021,986,300 | 30.463542 | 99 | 0.636484 | false |
DLR-SC/DataFinder | contrib/script_examples/command_line/items.py | 1 | 4139 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Demonstrates different items API functions.
"""
from StringIO import StringIO
import sys
from datafinder.script_api.repository import connectRepository, \
getWorkingRepository, setWorkingRepository
from datafinder.script_api.item import item_support
__version__ = "$Revision-Id:$"
def unmanagedRepository(basePath):
""" Demonstrates the script API using the local file system as unmanaged repository. """
print "Connecting repository file:///..."
repository = connectRepository("file:///")
setWorkingRepository(repository)
assert repository == getWorkingRepository()
print "\nChecking base path and creating children..."
print item_support.itemDescription(basePath)
item_support.refresh(basePath)
print item_support.getChildren(basePath)
collectionPath = basePath + "/collection"
item_support.createCollection(collectionPath)
print item_support.itemDescription(collectionPath)
leafPath = basePath + "/leaf"
item_support.createLeaf(leafPath)
item_support.storeData(leafPath, StringIO("some data..."))
print item_support.itemDescription(leafPath)
print "Put in the following data:"
fileObject = item_support.retrieveData(leafPath)
print fileObject.read()
fileObject.close()
linkPath = basePath + "/link.lnk"
item_support.createLink(linkPath, collectionPath)
print item_support.itemDescription(linkPath)
print item_support.getChildren(basePath)
print "\nCopy and move some things..."
copyLeafPath = collectionPath + "/leaf_copy"
item_support.copy(leafPath, copyLeafPath)
print item_support.getChildren(collectionPath)
item_support.move(copyLeafPath, collectionPath + "/leaf")
print item_support.getChildren(collectionPath)
print "\nArchiving everything..."
item_support.createArchive(basePath, collectionPath)
print "\nWalking the base path..."
print item_support.walk(basePath)
print "\nCleaning up..."
for path in [collectionPath, leafPath, linkPath]:
item_support.delete(path)
print item_support.walk(basePath)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Call: items.py basePath"
else:
basePath_ = unicode(sys.argv[1])
unmanagedRepository(basePath_)
| bsd-3-clause | 7,515,495,566,930,876,000 | 35.627273 | 92 | 0.70476 | false |
visionegg/visionegg | docs/conf.py | 1 | 8232 | # -*- coding: utf-8 -*-
#
# Vision Egg documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 29 21:31:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Vision Egg'
copyright = u'2001-2014, Vision Egg Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.1'
# The full version, including alpha/beta/rc tags.
release = '1.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'VisionEggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'VisionEgg.tex', u'Vision Egg Documentation',
u'Andrew Straw', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'visionegg', u'Vision Egg Documentation',
[u'Andrew Straw'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'VisionEgg', u'Vision Egg Documentation',
u'Andrew Straw', 'VisionEgg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| lgpl-2.1 | -9,219,821,388,749,676,000 | 30.54023 | 79 | 0.706876 | false |
soybean217/lora-python | UServer/http_api_no_auth/api/api_gateway.py | 1 | 5817 | import json
from http_api_no_auth.api import api, root
from http_api_no_auth.api.decorators import gateway_belong_to_user
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from ..http_auth import auth
from flask import request, Response
from .forms import get_formdata_from_json_or_form
from userver.object.statistician_gateway import Statistician
from utils.log import logger
import time
@api.route(root + 'gateways', methods=['GET', 'POST'])
@auth.auth_required
def gateways(user):
if request.method == 'GET':
logger.info('TIMESTAMP \'gateways\' HTTP[GET]:%s' % time.time())
gateways_list = []
logger.info('TIMESTAMP \'gateways\' QueryBegin:%s' % time.time())
gateways = Gateway.query.filter_by(user_id=user.id)
logger.info('TIMESTAMP \'gateways\' QueryOver:%s' % time.time())
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
respond_data = json.dumps(gateways_list)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
logger.info('TIMESTAMP \'gateways\' SendRespond:%s' % time.time())
return Response(status=200, response=respond_data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/statistician/hourly', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_hourly(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
hourly = statistician.count_in_hour()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' SendRespond:%s' % time.time())
return json.dumps(hourly), 200
@api.route(root + 'gateways/<gateway_id>/statistician/daily', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_daily(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
daily = statistician.count_in_daily()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' SendRespond:%s' % time.time())
return json.dumps(daily), 200
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
# def import_gateway(user, add_gateway):
# mac_addr = add_gateway['mac_addr'].data
# name = add_gateway['name'].data
# platform = add_gateway['platform'].data
# freq_plan = add_gateway['freq_plan'].data
# location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
# if platform == Platform.rpi:
# model = add_gateway['model'].data
# return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location)
# elif platform == Platform.ll:
# return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location)
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location) | mit | 255,426,881,030,014,140 | 43.10687 | 116 | 0.646702 | false |
valeriansaliou/django-gitlab-logging | gitlab_logging/handlers.py | 1 | 2294 | import logging
class GitlabIssuesHandler(logging.Handler):
"""
Handles logs as issues with GitLab API
"""
def __init__(self):
logging.Handler.__init__(self)
def __open_issue(self, title, content, trace_raw):
"""
Open an issue on GitLab with given content
"""
from tasks import task_log_gitlab_issue_open
task_log_gitlab_issue_open.delay(title, content, trace_raw)
def __reopen_issue(self, issue_id):
"""
Re-open a given issue on GitLab
"""
from tasks import task_log_gitlab_issue_reopen
task_log_gitlab_issue_reopen.delay(issue_id)
def emit(self, record):
"""
Fired when an error is emitted
"""
from django.conf import settings
from django.views.debug import get_exception_reporter_filter
from helpers import GitlabIssuesHelper
try:
has_repr, request_repr = True, '\n{0}'.format(
get_exception_reporter_filter(record.request).get_request_repr(record.request)
)
except Exception:
has_repr, request_repr = False, ':warning: Request data unavailable.'
# Generate issue title
title = '[{level}@{environment}] {message}'.format(
level=record.levelname,
message=record.getMessage(),
environment=getattr(settings, 'ENVIRONMENT', 'default'),
)
# Generate issue content
trace_raw = self.format(record)
contents = {
'head': '#### :zap: Note: this issue has been automatically opened.',
'trace': '```python\n%s\n```' % trace_raw,
'repr': '```\n%s\n```' % request_repr if has_repr\
else ('*%s*' % request_repr),
}
issue_exists, issue_id = GitlabIssuesHelper.check_issue(settings.GITLAB_PROJECT_ID, trace_raw)
if not issue_exists:
content = '{head}\n\n---\n\n{trace}\n\n---\n\n{repr}'.format(
head=contents['head'],
trace=contents['trace'],
repr=contents['repr'],
)
self.__open_issue(title, content, trace_raw)
elif issue_id:
self.__reopen_issue(issue_id)
| mit | 2,486,562,706,917,107,700 | 30.424658 | 102 | 0.551003 | false |
cpieloth/GPGPU-on-Hadoop | hadoop_ocl_link_test/runTime/runtime.py | 1 | 1118 | #!/usr/bin/env python
# Python 3
import shlex
import subprocess
import re
import sys
import time
# read command line arguments
if len(sys.argv) < 3:
print('Usage: <program> <outputfile> <value name>')
sys.exit(1)
# Variables
PRG_NAME = sys.argv[1]
DATA_NAME = sys.argv[2]
VAL_NAME = sys.argv[3]
RUNS = 5 # TODO to set
SLEEP = 2
# Print information
print('Program:', PRG_NAME)
print('Run size:', RUNS)
print('Outputfile: ', DATA_NAME, sep='', end='\n')
# Open file
file = open(DATA_NAME, 'a')
# Run tests
print('Start:')
regEx = re.compile('.*time=(.*);.*')
# prepare command to start
command = PRG_NAME # TODO to set
print(' command:', command, end=' ')
args = shlex.split(command)
avgTime = 0
for run in range(0, RUNS):
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
t = regEx.match(str(p.stdout.read()))
avgTime = avgTime + float(t.group(1))
print('.', end='')
time.sleep(SLEEP)
avgTime = avgTime/RUNS
print('done! Average time:', avgTime)
file.write(VAL_NAME + "\t" + str(avgTime) + '\n') # TODO to set
# Close file
file.close()
| apache-2.0 | -4,649,629,103,920,189,000 | 19.5 | 63 | 0.63059 | false |
duyuan11/glumpy | glumpy/app/window/backends/backend_sdl2.py | 1 | 14801 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import sys, ctypes
from glumpy import gl
from glumpy.log import log
from glumpy.app import configuration
from glumpy.app.window import window
# Backend name
__name__ = "SDL2"
# Backend version (if available)
__version__ = ""
# Backend availability
__availability__ = False
# Whether the framework has been initialized
__initialized__ = False
# Active windows
__windows__ = {}
# ---------------------------------------------------- convenient functions ---
def name(): return __name__
def version(): return __version__
def available(): return __availability__
# --------------------------------------------------------------- init/exit ---
def __init__():
global __initialized__
if not __initialized__:
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
__initialized__ = True
def __exit__():
global __initialized__
sdl2.SDL_Quit()
__initialized__ = False
# ------------------------------------------------------------ availability ---
try:
import sdl2
if not __initialized__:
__init__()
__availability__ = True
__version__ = ("%d.%d.%d") % sdl2.version_info[:3]
__mouse_map__ = {sdl2.SDL_BUTTON_LEFT: window.mouse.LEFT,
sdl2.SDL_BUTTON_MIDDLE: window.mouse.MIDDLE,
sdl2.SDL_BUTTON_RIGHT: window.mouse.RIGHT }
__key_map__ = {
# sdl2.SDLK_LSHIFT: window.key.SHIFT,
# sdl2.SDLK_RSHIFT: window.key.SHIFT,
# sdl2.SDLK_LCTRL: window.key.CONTROL,
# sdl2.SDLK_RCTRL: window.key.CONTROL,
# sdl2.SDLK_LALT: window.key.ALT,
# sdl2.SDLK_RALT: window.key.ALT,
# sdl2.SDLK_LGUI: window.key.META,
# sdl2.SDLK_RGUI: window.key.META,
sdl2.SDLK_LEFT: window.key.LEFT,
sdl2.SDLK_UP: window.key.UP,
sdl2.SDLK_RIGHT: window.key.RIGHT,
sdl2.SDLK_DOWN: window.key.DOWN,
sdl2.SDLK_PAGEUP: window.key.PAGEUP,
sdl2.SDLK_PAGEDOWN: window.key.PAGEDOWN,
sdl2.SDLK_INSERT: window.key.INSERT,
sdl2.SDLK_DELETE: window.key.DELETE,
sdl2.SDLK_HOME: window.key.HOME,
sdl2.SDLK_END: window.key.END,
sdl2.SDLK_ESCAPE: window.key.ESCAPE,
sdl2.SDLK_BACKSPACE: window.key.BACKSPACE,
sdl2.SDLK_F1: window.key.F1,
sdl2.SDLK_F2: window.key.F2,
sdl2.SDLK_F3: window.key.F3,
sdl2.SDLK_F4: window.key.F4,
sdl2.SDLK_F5: window.key.F5,
sdl2.SDLK_F6: window.key.F6,
sdl2.SDLK_F7: window.key.F7,
sdl2.SDLK_F8: window.key.F8,
sdl2.SDLK_F9: window.key.F9,
sdl2.SDLK_F10: window.key.F10,
sdl2.SDLK_F11: window.key.F11,
sdl2.SDLK_F12: window.key.F12,
sdl2.SDLK_SPACE: window.key.SPACE,
sdl2.SDLK_RETURN: window.key.ENTER,
sdl2.SDLK_TAB: window.key.TAB }
except ImportError:
__availability__ = False
__version__ = None
# -------------------------------------------------------------- capability ---
capability = {
"Window position get/set" : True,
"Window size get/set" : True,
"Multiple windows" : True,
"Mouse scroll events" : True,
"Non-decorated window" : True,
"Non-sizeable window" : True,
"Fullscreen mode" : True,
"Unicode processing" : True,
"Set GL version" : True,
"Set GL profile" : True,
"Share GL context" : True,
}
# ------------------------------------------------------- set_configuration ---
def set_configuration(config):
""" Set gl configuration """
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_RED_SIZE, config.red_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_GREEN_SIZE, config.green_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_BLUE_SIZE, config.blue_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_ALPHA_SIZE, config.alpha_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_DEPTH_SIZE, config.depth_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_STENCIL_SIZE, config.stencil_size)
if config.samples:
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLEBUFFERS, 1)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLESAMPLES, config.samples)
else:
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLEBUFFERS, 0)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLESAMPLES, 0)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_STEREO, config.stereo)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_FRAMEBUFFER_SRGB_CAPABLE, config.srgb)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
config.major_version)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
config.minor_version)
if config.profile == "core":
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
elif config.profile == "compatibility":
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_COMPATIBILITY)
# elif configuration.profile == "es":
# sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONFIGURATION_PROFILE_MASK,
# sdl2.SDL_GL_CONFIGURATION_PROFILE_ES)
# ------------------------------------------------------------------ Window ---
class Window(window.Window):
""" """
def __init__( self, width=256, height=256, title=None, visible=True, aspect=None,
decoration=True, fullscreen=False, config=None, context=None, color=(0,0,0,1)):
""" """
window.Window.__init__(self, width=width,
height=height,
title=title,
visible=visible,
aspect=aspect,
decoration=decoration,
fullscreen=fullscreen,
config=config,
context=context,
color=color)
if config is None:
config = configuration.Configuration()
set_configuration(config)
flags = sdl2.SDL_WINDOW_SHOWN
# flags |= sdl2.SDL_WINDOW_ALLOW_HIGHDPI
flags |= sdl2.SDL_WINDOW_RESIZABLE
flags |= sdl2.SDL_WINDOW_OPENGL
if visible:
flags |= sdl2.SDL_WINDOW_SHOWN
else:
flags |= SDL_WINDOW_HIDDEN
if not decoration:
flags |= sdl2.SDL_WINDOW_BORDERLESS
self._native_window = sdl2.SDL_CreateWindow(self._title,
sdl2.SDL_WINDOWPOS_UNDEFINED,
sdl2.SDL_WINDOWPOS_UNDEFINED,
width, height, flags)
self._native_context = sdl2.SDL_GL_CreateContext(self._native_window)
self._native_id = sdl2.SDL_GetWindowID(self._native_window)
sdl2.SDL_GL_SetSwapInterval(0)
# OSX: check framebuffer size / window size. On retina display, they
# can be different so we try to correct window size such as having
# the framebuffer size of the right size
# w,h = ctypes.c_int(),ctypes.c_int()
# sdl2.SDL_GL_GetDrawableSize(self._native_window, w, h)
# w,h = w.value(), h.value()
# if w != width or h!= height:
# width = width/2
# height= height/2
# sdl2.SDL_SetWindowSize(self._native_window, int(width), int(height))
self._height = height
self._width = width
__windows__[self._native_id] = self
def process_event(self, event):
if event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
width = event.window.data1
height = event.window.data2
self.dispatch_event('on_resize', width, height)
elif event.window.event == sdl2.SDL_WINDOWEVENT_SHOWN:
self.dispatch_event('on_show')
elif event.window.event == sdl2.SDL_WINDOWEVENT_HIDDEN:
self.dispatch_event('on_hide')
elif event.window.event == sdl2.SDL_WINDOWEVENT_ENTER:
self.dispatch_event('on_enter')
elif event.window.event == sdl2.SDL_WINDOWEVENT_LEAVE:
self.dispatch_event('on_leave')
#elif event.window.event == sdl2.SDL_WINDOWEVENT_MOVED:
# self.dispatch_event('on_move')
elif event.window.event == sdl2.SDL_WINDOWEVENT_CLOSE:
self.close()
elif event.type == sdl2.SDL_QUIT:
self.close()
elif event.type == sdl2.SDL_MOUSEMOTION:
x = event.motion.x
y = event.motion.y
buttons = event.motion.state
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
if buttons & sdl2.SDL_BUTTON_LMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.LEFT)
elif buttons & sdl2.SDL_BUTTON_MMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.MIDDLE)
elif buttons & sdl2.SDL_BUTTON_RMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.RIGHT)
else:
self.dispatch_event("on_mouse_motion", x, y, dx, dy)
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
x = event.button.x
y = event.button.y
button = event.button.button
self._mouse_x = x
self._mouse_y = y
if button == sdl2.SDL_BUTTON_LEFT:
self.dispatch_event("on_mouse_press", x, y, window.mouse.LEFT)
elif button == sdl2.SDL_BUTTON_MIDDLE:
self.dispatch_event("on_mouse_press", x, y, window.mouse.MIDDLE)
elif button == sdl2.SDL_BUTTON_RIGHT:
self.dispatch_event("on_mouse_press", x, y, window.mouse.RIGHT)
elif event.type == sdl2.SDL_MOUSEBUTTONUP:
x = event.button.x
y = event.button.y
button = event.button.button
self._mouse_x = x
self._mouse_y = y
if button == sdl2.SDL_BUTTON_LEFT:
self.dispatch_event("on_mouse_release", x, y, window.mouse.LEFT)
elif button == sdl2.SDL_BUTTON_MIDDLE:
self.dispatch_event("on_mouse_release", x, y, window.mouse.MIDDLE)
elif button == sdl2.SDL_BUTTON_RIGHT:
self.dispatch_event("on_mouse_release", x, y, window.mouse.RIGHT)
elif event.type == sdl2.SDL_MOUSEWHEEL:
offset_x = event.wheel.x
offset_y = event.wheel.y
self.dispatch_event("on_mouse_scroll",
self._mouse_x, self._mouse_y, offset_x, offset_y)
elif event.type == sdl2.SDL_KEYUP:
keysym = event.key.keysym
modifiers = self._modifiers_translate(keysym.mod)
symbol = self._keyboard_translate(keysym.sym)
self.dispatch_event("on_key_press", symbol, modifiers)
elif event.type == sdl2.SDL_KEYDOWN:
keysym = event.key.keysym
modifiers = self._modifiers_translate(keysym.mod)
symbol = self._keyboard_translate(keysym.sym)
self.dispatch_event("on_key_release", symbol, modifiers)
def _modifiers_translate( self, modifiers ):
_modifiers = 0
if modifiers & (sdl2.SDLK_LSHIFT | sdl2.SDLK_RSHIFT):
_modifiers |= window.key.MOD_SHIFT
if modifiers & (sdl2.SDLK_LCTRL | sdl2.SDLK_RCTRL):
_modifiers |= window.key.MOD_CTRL
if modifiers & (sdl2.SDLK_LALT | sdl2.SDLK_RALT):
_modifiers |= window.key.MOD_ALT
return _modifiers
def _keyboard_translate(self, code):
ascii = code
if (0x020 <= ascii <= 0x040) or (0x05b <= ascii <= 0x07e):
return ascii
elif ascii <= 0x020:
code = ascii
return __key_map__.get(code, window.key.UNKNOWN)
def show(self):
sdl2.SDL_ShowWindow(self._native_window)
self.dispatch_event('on_show')
def hide(self):
sdl2.SDL_HideWindow(self._native_window)
self.dispatch_event('on_hide')
def close(self):
sdl2.SDL_DestroyWindow(self._native_window)
del __windows__[self._native_id]
for i in range(len(self._timer_stack)):
handler, interval = self._timer_stack[i]
self._clock.unschedule(handler)
self.dispatch_event('on_close')
def set_title(self, title):
log.warn('%s backend cannot set window title' % __name__)
def get_title(self):
log.warn('%s backend cannot get window title' % __name__)
def set_size(self, width, height):
log.warn('%s backend cannot set window size' % __name__)
def get_size(self):
log.warn('%s backend cannot get window size' % __name__)
def set_position(self, x, y):
log.warn('%s backend cannot set window position' % __name__)
def get_position(self):
log.warn('%s backend cannot get position' % __name__)
def swap(self):
sdl2.SDL_GL_SwapWindow(self._native_window)
def activate(self):
sdl2.SDL_GL_MakeCurrent(self._native_window, self._native_context)
# ----------------------------------------------------------------- windows ---
def windows():
return __windows__.values()
# ----------------------------------------------------------------- process ---
def process(dt):
# Poll for and process events
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
win_id = event.window.windowID
if win_id in __windows__.keys():
win = __windows__[win_id]
win.process_event(event)
for window in windows():
# Make window active
window.activate()
# Dispatch the main draw event
window.dispatch_event('on_draw', dt)
# Dispatch the idle event
window.dispatch_event('on_idle', dt)
# Swap buffers
window.swap()
return len(__windows__.values())
| bsd-3-clause | -7,783,163,129,631,949,000 | 36.85422 | 97 | 0.539018 | false |
NeurodataWithoutBorders/api-python | examples/create_scripts/general-e.py | 1 | 2091 | #!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
"""
Example using extension to add metadata to group /general
Group /general contains general metadata, i.e. metadata that
applies to the entire session.
This example uses the extension defined in extensions/e-general.py
to add new metadata to define then add new metadata to section
/general
"""
# create a new NWB file
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = utils.create_identifier("add metadata to general")
settings["mode"] = "w"
settings["start_time"] = "2016-04-07T03:16:03.604121"
settings["description"] = "Test file demonstrating use of an extension for general"
# specify the extension (Could be more than one. Only one used now).
settings['extensions'] = ["extensions/e-general.py"]
f = nwb_file.open(**settings)
########################################################################
# Specifier experimenter (this dataset is part of the core NWB format)
eds = f.set_dataset('experimenter', "Joseline Doe")
# specify attribute to experimenter, this defined in extension file.
# it is not part of the core NWB format
eds.set_attr("orcid_id", "7012023")
# Now add metadata that is defined by the extension
gri = f.make_group("rctn_info")
gri.set_dataset("seminars", ["Thom Smith", "Dwight Keenan", "Sue Trimble"])
gri.set_dataset("attendance", [23, 45, 33])
f.set_dataset("rctn:activity_level", '7')
f.set_dataset("rctn:time_since_fed", '6 hours 20 minutes')
f.set_dataset("notes", "some notes")
# also set extra metadata about subject
# these datasets are also defined in the extension
# dataset names and values are from a file in the AIBS cell types database
f.set_dataset("aibs_specimen_id",313862134)
f.set_dataset("aibs_specimen_name","Sst-IRES-Cre;Ai14(IVSCC)-167638.03.01.01")
f.set_dataset("aibs_dendrite_state","NA")
f.set_dataset("aibs_dendrite_type","aspiny")
f.set_dataset("aibs_cre_line","Sst-IRES-Cre")
# All done. Close the file
f.close()
| bsd-3-clause | 7,030,291,989,708,587,000 | 33.278689 | 83 | 0.704926 | false |
guildai/guild | guild/remotes/gist.py | 1 | 20430 | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import pprint
import shutil
import subprocess
import sys
import zipfile
from guild import remote as remotelib
from guild import remote_util
from guild import util
from . import meta_sync
log = logging.getLogger("guild.remotes.gist")
class NoSuchGist(remotelib.OperationError):
pass
class MissingRequiredEnv(remotelib.OperationError):
pass
class GistRemoteType(remotelib.RemoteType):
def __init__(self, _ep):
pass
def remote_for_config(self, name, config):
return GistRemote(name, config)
def remote_for_spec(self, spec):
name = "gist:%s" % spec
user, gist_name = _parse_spec(spec)
config = remotelib.RemoteConfig(
{
"user": user,
"gist-name": gist_name,
}
)
return GistRemote(name, config)
def _parse_spec(spec):
parts = spec.split("/", 1)
if len(parts) == 1:
try:
return _required_gist_user_env({}), parts[0]
except MissingRequiredEnv as e:
raise remotelib.InvalidRemoteSpec(str(e))
return parts
def _required_gist_user_env(env):
try:
return _required_env("GIST_USER", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"gist remotes must be specified as USER/GIST_NAME if GIST_USER "
"environment variable is not defined"
)
def _required_env(name, sources):
for src in sources:
try:
return src[name]
except KeyError:
pass
raise KeyError(name)
class GistRemote(meta_sync.MetaSyncRemote):
def __init__(self, name, config):
self.name = name
self.user = config["user"]
self.gist_name = config["gist-name"]
self._gist_readme_name = _gist_readme_name(self.gist_name)
self.local_env = remote_util.init_env(config.get("local-env"))
self.local_sync_dir = meta_sync.local_meta_dir(
_remote_full_name(self.user, self.gist_name), ""
)
self._local_gist_repo = os.path.join(self.local_sync_dir, "gist")
runs_dir = os.path.join(self.local_sync_dir, "runs")
super(GistRemote, self).__init__(runs_dir, None)
def status(self, verbose=False):
remote_util.remote_activity("Getting %s status", self.name)
gist = self._repo_gist()
sys.stdout.write("%s (gist %s) is available\n" % (self.name, gist["id"]))
if verbose:
sys.stdout.write(pprint.pformat(gist))
sys.stdout.write("\n")
def start(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
log.info("Creating gist")
gist = self._create_gist()
log.info(
"Created %s (gist %s) for user %s",
self.name,
gist["id"],
self.user,
)
self._sync_runs_meta()
else:
raise remotelib.OperationError(
"%s (gist %s) already exists for user %s"
% (self.name, gist["id"], self.user)
)
def stop(self):
self._delete_gist()
self._clear_gist_cache()
def _delete_gist(self):
gist = self._repo_gist()
log.info("Deleting gist %s", gist["id"])
_delete_gist(gist, self.local_env)
def _clear_gist_cache(self):
log.info("Clearning local cache")
log.debug("deleting %s", self.local_sync_dir)
util.ensure_safe_rmtree(self.local_sync_dir)
def stop_details(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
return None
else:
return "gist %s will be deleted - THIS CANNOT BE UNDONE!" % gist["id"]
def _sync_runs_meta(self, force=False):
remote_util.remote_activity("Refreshing run info for %s" % self.name)
self._ensure_local_gist_repo()
self._sync_runs_meta_for_gist(force)
def _ensure_local_gist_repo(self):
if _is_git_repo(self._local_gist_repo):
log.debug("gist local repo found at %s", self._local_gist_repo)
return
log.debug("initializing gist local repo at %s", self._local_gist_repo)
gist = self._repo_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _repo_gist(self):
gist = _find_gist_with_file(self.user, self._gist_readme_name, self.local_env)
if not gist:
raise NoSuchGist(
"cannot find gist remote '%s' (denoted by the file '%s') for user %s\n"
"If the gist is private, you must specify a valid access token with "
"GIST_ACCESS_TOKEN.\nFor more information see "
"https://my.guild.ai/docs/gists."
% (self.gist_name, self._gist_readme_name, self.user)
)
return gist
def _sync_runs_meta_for_gist(self, force):
try:
_pull_gist_repo(self._local_gist_repo, self.local_env)
except NoSuchGist:
self._clear_gist_cache()
else:
git_commit = self._gist_repo_current_commit()
if not force and self._meta_current(git_commit):
return
_refresh_runs_meta(
self._local_gist_repo,
self._runs_dir,
git_commit,
self.local_sync_dir,
)
def _meta_current(self, git_commit):
return meta_sync.meta_current(self.local_sync_dir, lambda: git_commit)
def _gist_repo_current_commit(self):
return _git_current_commit(self._local_gist_repo)
def _delete_runs(self, runs, permanent):
assert permanent # gist remotes only support permanent delete
_delete_gist_runs(runs, self._local_gist_repo, self._runs_dir)
_commit_and_push_gist_repo_for_delete(
self._local_gist_repo,
_delete_commit_msg(),
self.local_env,
self.name,
)
def _restore_runs(self, runs):
raise NotImplementedError()
def _purge_runs(self, runs):
raise NotImplementedError()
def push(self, runs, delete=False):
self._ensure_synced_gist_repo()
_export_runs_to_gist_archives(runs, self._local_gist_repo)
_commit_and_push_gist_repo_for_push(
self._local_gist_repo,
_push_commit_msg(),
self.local_env,
self.name,
)
self._sync_runs_meta_for_gist(True)
def _ensure_synced_gist_repo(self):
try:
self._sync_runs_meta()
except NoSuchGist:
self._init_gist_repo()
def _init_gist_repo(self):
gist = self._create_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _create_gist(self):
return _create_gist(
self.user, self.gist_name, self._gist_readme_name, self.local_env
)
def pull(self, runs, delete=False):
from guild import var
# That we have `runs` means we've sync'd runs meta. "Meta" in
# this case also contains the runs themselves as zip
# archives. At this point we need only extract the run
# archives to the runs dir.
_extract_runs(runs, self._local_gist_repo, var.runs_dir(), self.name)
def _remote_full_name(user, gist_name):
return "gist-%s-%s" % (user, gist_name)
def _gist_readme_name(gist_name):
return "[Guild AI] %s" % _ensure_md_ext(gist_name)
def _ensure_md_ext(s):
if s.lower().endswith(".md"):
return s
return s + ".md"
def _find_gist_with_file(user, filename, env):
import requests # expensive
page = 1
url = "https://api.github.com/users/%s/gists" % user
while True:
resp = requests.get(
url,
params={"page": page, "per_page": 100},
headers=_github_auth_headers(env),
)
gists = resp.json()
if not gists:
return None
for gist in gists:
for name in gist["files"]:
if name == filename:
return gist
page += 1
def _github_auth_headers(env):
try:
access_token = _required_gist_access_token(env)
except MissingRequiredEnv:
return {}
else:
return {"Authorization": "token %s" % access_token}
def _sync_gist_repo(gist, local_repo, env):
repo_url = _gist_repo_url(gist, env)
if _is_git_repo(local_repo):
_pull_gist_repo(local_repo, env)
else:
_clone_gist_repo(repo_url, local_repo, env)
def _gist_repo_url(gist, env):
if _gist_urltype(env) == "ssh":
return "[email protected]:%s.git" % gist["id"]
else:
return gist["git_pull_url"]
def _gist_urltype(env):
try:
return _required_env("GIST_URLTYPE", [env, os.environ])
except KeyError:
return None
def _clone_gist_repo(repo_url, local_repo, env):
cmd = [_git_cmd(), "clone", "--quiet", repo_url, local_repo]
log.debug("cloning %s to %s", repo_url, local_repo)
_subprocess_tty(cmd, extra_env=env)
def _git_cmd():
cmd = util.which("git")
if not cmd:
raise remotelib.OperationError(
"git command is not available\n"
"Refer to https://git-scm.com/book/en/v2/Getting-Started-Installing-Git "
"for help installing it."
)
return cmd
def _pull_gist_repo(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "pull", "--quiet", "--rebase"]
log.debug("pulling for %s", local_repo)
code = _subprocess_tty(cmd, extra_env=env, allowed_returncodes=(0, 1))
if code == 1:
raise NoSuchGist()
def _refresh_runs_meta(gist_repo, runs_dir, meta_id, local_sync_dir):
for archive in _run_archives(gist_repo):
_unpack_meta(archive, runs_dir)
meta_sync.write_local_meta_id(meta_id, local_sync_dir)
def _run_archives(dir):
for name in os.listdir(dir):
if _is_guild_run(name):
yield os.path.join(dir, name)
def _is_guild_run(name):
return name.startswith("guildai-run-") and name.endswith(".zip")
def _unpack_meta(archive, runs_dir):
log.debug("unpacking %s meta to %s", archive, runs_dir)
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
if _is_meta_file(name):
zf.extract(name, runs_dir)
def _is_meta_file(name):
return (
name.endswith(".guild/opref")
or "/.guild/attrs/" in name
or "/.guild/LOCK" in name
)
def _is_git_repo(dir):
return os.path.exists(os.path.join(dir, ".git"))
def _git_current_commit(git_repo):
if not _is_git_repo(git_repo):
return None
cmd = [_git_cmd(), "-C", git_repo, "log", "-1", "--format=%H"]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return out.decode("utf-8").strip()
def _extract_runs(runs, archive_dir, dest_dir, gist_name):
for run in runs:
archive = os.path.join(archive_dir, _run_archive_filename(run.id))
if not os.path.exists(archive):
log.error(
"%s archive for gist does not exist (%s), skipping", run.id, archive
)
continue
log.info("Copying %s from %s", run.id, gist_name)
_replace_run(archive, run.id, dest_dir)
def _run_archive_filename(run_id):
return "guildai-run-%s.zip" % run_id
def _replace_run(archive, run_id, dest_dir):
with util.TempDir("guild-gist-run-") as tmp:
_extract_archive(archive, tmp.path)
extracted_run_dir = _validate_extracted_run(tmp.path, run_id, archive)
dest_run_dir = os.path.join(dest_dir, run_id)
_replace_run_dir(dest_run_dir, extracted_run_dir)
def _extract_archive(archive, dest_dir):
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
zf.extract(name, dest_dir)
def _validate_extracted_run(dir, run_id, archive):
# RUN_DIR/.guild/opref is required for a run.
extracted_run_dir = os.path.join(dir, run_id)
opref_path = os.path.join(extracted_run_dir, ".guild", "opref")
if not os.path.exists(opref_path):
log.error("%s does not contain expected run %s", archive, run_id)
raise remotelib.OperationError("invalid run archive in gist")
return extracted_run_dir
def _replace_run_dir(run_dir, src_dir):
log.debug("moving %s to %s", src_dir, run_dir)
util.ensure_safe_rmtree(run_dir)
shutil.move(src_dir, run_dir)
def _create_gist(gist_remote_user, gist_remote_name, gist_readme_name, env):
import requests
access_token = _required_gist_access_token(env)
content = _gist_readme_content(gist_remote_user, gist_remote_name)
data = {
"accept": "application/vnd.github.v3+json",
"description": "Guild AI Repository",
"public": True,
"files": {
gist_readme_name: {
"filename": gist_readme_name,
"type": "text/markdown",
"language": "Markdown",
"content": content,
}
},
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.post("https://api.github.com/gists", json=data, headers=headers)
if resp.status_code not in (200, 201):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
return resp.json()
def _required_gist_access_token(env):
try:
return _required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"missing required environment variable GIST_ACCESS_TOKEN\n"
"This operation requires a GitHub personal access token for "
"creating gists.\n"
"See https://my.guild.ai/docs/gists for more information."
)
def _gist_readme_content(user, remote_name):
return (
"This is a Guild AI runs repository. To access runs, "
"[install Guild AI](https://guild.ai/install) and run `guild pull gist:%s/%s`. "
"For more information about Guild AI Gist based repositories, see "
"[Guild AI - Gists](https://my.guild.ai/docs/gists)." % (user, remote_name)
)
def _export_runs_to_gist_archives(runs, gist_repo):
with util.TempDir("guild-runs-export-") as tmp:
archives = [_run_export_archive(run, tmp.path) for run in runs]
_export_runs(zip(runs, archives))
for archive_src in archives:
archive_dest = os.path.join(gist_repo, os.path.basename(archive_src))
util.ensure_deleted(archive_dest)
shutil.move(archive_src, archive_dest)
def _run_export_archive(run, export_dir):
return os.path.join(export_dir, _run_archive_filename(run.id))
def _export_runs(runs_with_dest):
from guild import run_util
for run, dest in runs_with_dest:
log.info("Compressing %s", run.id)
run_util.export_runs([run], dest, copy_resources=False, quiet=True)
def _push_commit_msg():
import guild
return "`guild push` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _commit_and_push_gist_repo_for_push(repo, commit_msg, env, remote_name):
_git_add_all(repo, env)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
pass
log.info("Copying runs to %s", remote_name)
_git_push(repo, env)
def _git_add_all(local_repo, env, update=False):
cmd = [_git_cmd(), "-C", local_repo, "add", "."]
if update:
cmd.append("-u")
log.debug("adding files for %s", local_repo)
_subprocess_quiet(cmd, extra_env=env)
class _NoChanges(Exception):
pass
def _git_commit(local_repo, msg, env):
cmd = [_git_cmd(), "-C", local_repo, "commit", "-m", msg]
log.debug("commiting for %s", local_repo)
result = _subprocess_quiet(cmd, extra_env=env, allowed_returncodes=(0, 1))
if result == 1:
raise _NoChanges()
def _git_push(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "push", "--quiet"]
env = _maybe_askpass(env, local_repo)
log.debug("pushing for %s", local_repo)
_subprocess_tty(cmd, extra_env=env)
def _maybe_askpass(env, local_repo):
if not _gist_access_token_defined(env):
return
askpass_path = _maybe_gist_access_token_script(local_repo)
if not askpass_path:
return env
env = dict(env)
env["GIT_ASKPASS"] = askpass_path
return env
def _gist_access_token_defined(env):
try:
_required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
return False
else:
return True
def _maybe_gist_access_token_script(local_repo):
if util.get_platform() == "Windows":
return None
script_path = _gist_access_token_script(local_repo)
if os.path.exists(script_path):
return script_path
_write_gist_access_token_script(script_path)
return script_path
def _gist_access_token_script(local_repo):
return os.path.join(local_repo, ".git", "gist-access-token")
def _write_gist_access_token_script(path):
with open(path, "w") as f:
f.write("echo $GIST_ACCESS_TOKEN\n")
util.make_executable(path)
def _delete_gist(gist, env):
import requests
access_token = _required_gist_access_token(env)
data = {
"accept": "application/vnd.github.v3+json",
"gist_id": gist["id"],
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.delete(
"https://api.github.com/gists/%s" % gist["id"], json=data, headers=headers
)
if resp.status_code not in (200, 204):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
def _delete_gist_runs(runs, gist_repo, runs_dir):
for run in runs:
log.info("Deleting %s", run.id)
_delete_gist_repo_run_archive(gist_repo, run.id)
_delete_run(run, runs_dir)
def _delete_gist_repo_run_archive(gist_repo, run_id):
run_archive = os.path.join(gist_repo, _run_archive_filename(run_id))
log.debug("deleting %s", run_archive)
util.ensure_deleted(run_archive)
def _delete_run(run, runs_dir):
run_dir = os.path.join(runs_dir, run.id)
log.debug("deleting %s", run_dir)
util.ensure_safe_rmtree(run_dir)
def _commit_and_push_gist_repo_for_delete(repo, commit_msg, env, remote_name):
_git_add_all(repo, env, update=True)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
log.info("Nothing to update for %s - gist is up-to-date", remote_name)
else:
log.info("Updating runs on %s", remote_name)
_git_push(repo, env)
def _delete_commit_msg():
import guild
return "`guild runs rm` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _subprocess_tty(cmd, extra_env, allowed_returncodes=(0,)):
env = dict(os.environ)
if extra_env:
env.update(extra_env)
log.debug("%r", cmd)
p = subprocess.Popen(cmd, env=env)
p.wait()
if p.returncode not in allowed_returncodes:
log.debug("exit code for %r is %i", cmd, p.returncode)
raise SystemExit("error running %s - see above for details" % cmd[0])
return p.returncode
def _subprocess_quiet(cmd, extra_env, allowed_returncodes=(0,)):
log.debug("%r", cmd)
return remote_util.subprocess_call(
cmd,
extra_env=extra_env,
quiet=True,
allowed_returncodes=allowed_returncodes,
)
| apache-2.0 | 8,576,580,919,284,401,000 | 29.266667 | 88 | 0.598776 | false |
adamcandy/Gaia | FileTodo.py | 1 | 56922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##########################################################################
#
# Gaia, task list organiser in with Caldav server sync.
#
# Copyright (C) 2013-2014 Dr Adam S. Candy.
# Dr Adam S. Candy, [email protected]
#
# This file is part of the Gaia project.
#
# Gaia is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gaia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gaia. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from Universe import universe, colour
import sys
import os
from datetime import datetime, timedelta
import re
from uuid import uuid4
from Support import error, report
from Support import generate_mono
from Support import repo_add, repo_remove, repo_update
from Parsers import is_relative_date, calculate_delta, prioritystring, is_same_time, timedelta_to_human, do_avoid_weekend, next_weekday, next_increment
def indentation(s, tabsize=2):
sx = s.expandtabs(tabsize)
return (len(sx) - len(sx.lstrip()))/tabsize
#return 0 if sx.isspace() else (len(sx) - len(sx.lstrip()))/tabsize
def parsedate(string, reference=None, alarm=False, allday=False, forward=False):
date = None
if (string is None or len(string) == 0):
if alarm:
if reference is not None:
if allday:
# Warning for day events 1800 - 1000 = 8 hours
date = reference + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
date = reference + universe.defaulttime.diff
else:
string = string.strip()
# Deal with tasks due on a day, not specific time
if len(string) == 6:
allday = True
if alarm:
string = string + universe.defaulttime.alarm.strftime('%H%M')
else:
string = string + universe.defaulttime.due.strftime('%H%M')
try:
if re.match('^\d{6}$', string):
date = datetime.strptime(string, '%y%m%d')
elif re.match('^\d{10}$', string):
try:
date = universe.timezone.localize(datetime.strptime(string, '%y%m%d%H%M'))
#date = datetime.strptime(string, '%y%m%d%H%M')
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
elif is_relative_date(string):
d = calculate_delta(string)
if d is not None:
if reference is not None:
if forward:
date = reference + d
else:
date = reference - d
else:
date = universe.timezone.localize(datetime.strptime(string))
#date = datetime.strptime(string)
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
return date, allday
def tasklist_read(name, category=None):
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/' + name
if not os.path.exists(filename):
return None
f = open(filename, 'r')
level = 0
taskline = ''
notes = ''
lines = (f.read().decode('utf8') + os.linesep).splitlines()
f.close()
#end = len(lines)
#blank = False
#for i in range(len(lines)):
# if len(lines[i]) > 0:
# blank = False
# continue
# if not blank:
# blank = True
# continue
# end = i
# break
# Temp
#end = len(lines)
#root = FileTodos(lines[:end], title=name, parents=[category], filenotes=lines[end+1:])
root = FileTodos(lines, title=name, parents=[category])
root.check_for_modified_children()
if root.is_empty():
report(' ' + colour.grey + 'Removing EMPTY ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + root.name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
root.set_modified()
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return root
class FileTodos(object):
def __init__(self, lines=None, filenotes=None, parents=[], parent=None, title=None, flow='parallel', translate=None, number=1, level=None, uid=None, caldav=False, next_action=None):
self.lines = None
self.filenotes = filenotes
if self.filenotes is None:
self.filenotes = []
self.block = []
self.level = -2
# top level modified flag for file updates
self.modified = False
# task level update flag for caldav
self.updated = False
self.sequence = 0
if lines is not None:
self.lines = lines
self.block = [ 0, len(self.lines) ]
if title is not None:
self.level = 0
else:
self.level = indentation(self.lines[0]) + 1
title = self.lines[0].lstrip()
if level is not None:
self.level = level
self.name = None
self.duetext = None
self.alarmtext = None
self.is_checklist = False
self.flowtext = None
self.flow = flow
self.is_header = False
self.is_completed = False
#if caldav:
# self.is_onhold = None
# self.starttext = None
# self.repeat = None
#else:
#self.is_everpresent = False
self.is_onhold = False
self.starttext = None
self.repeat = None
self.expiretext = None
self.wait = ''
self.waitonrepeat = False
self.priority = None
self.is_permanent = False
self.avoidweekends = False
self.current = False
self.error = False
self.sublist = None
self.parents = parents
self.parent = parent
self.number = number
self.uid = uid
self.translate = ''
if translate is not None:
self.translate = translate
self.interpret_task(title)
#if len(self.translate) > 0:
# print self.name, self.translate
self.note = self.find_note()
self.childblocks = self.identify_blocks()
self.children = []
self.due, allday = parsedate(self.duetext)
self.alarm, allday = parsedate(self.alarmtext, reference=self.due, alarm=True, allday=allday)
self.start, allday = parsedate(self.starttext, reference=self.due)
self.expire, allday = parsedate(self.expiretext, reference=self.due, forward=True)
self.active = False
self.titleoptions = ''
self.type = 'file'
self.next_action = next_action
if self.next_action is not None:
self.next_action = next_action.lstrip()
# Need to add next action, in case of checklist, main header is first??
if lines is not None:
if len(self.childblocks) > 0:
filenotesstart = self.childblocks[-1][-1]
else:
filenotesstart = 0
i = filenotesstart
for i in range(filenotesstart, len(lines)):
if len(lines[i]) > 0:
filenotesstart = i
break
if self.level == 0:
#print self.name, filenotesstart
if filenotesstart < len(lines):
if lines[filenotesstart] is not None:
if len(lines[filenotesstart]) > 0:
self.filenotes = lines[filenotesstart:]
if len(self.childblocks) > 0:
self.find_children()
def child_is_task(self, task):
found = False
for child in self.children:
if child.is_same_task(task):
found = True
break
return found
def is_empty(self):
return (not self.has_children() and len(self.filenotes) == 0)
def is_same_task(self, task):
if (len(self.parents) == 0 or len(task.parents) == 0):
return self.name == task.name
else:
return (self.name == task.name and self.parents[0] == task.parents[0])
def is_translate_header(self):
if self.has_children():
if self.is_translate():
if self.parent is None:
return True
else:
if not self.parent.is_translate():
return True
return False
def group(self, masked=True):
if self.is_wait() and masked:
group = 'wait'
elif (self.is_translate() and (not self.is_translate_header()) and masked):
group = self.translate
elif len(self.parents) > 0:
group = self.parents[0]
else:
# Either root of tree, or an un-tied task!
group = 'home'
return group
def allday(self):
return (is_same_time(self.due, universe.defaulttime.due) and is_same_time(self.alarm, universe.defaulttime.alarm) )
def do_repeat(self):
avoid_weekends = (self.group(masked=False) in universe.skipweekendlists or self.avoidweekends)
# Deal with permanent task
if self.is_permanent:
#self.is_onhold = True
detail = ''
if self.waitonrepeat:
self.wait = 'wait'
detail = ' and moved to wait status'
self.set_updated()
report(colour.yellow + 'Permenant task' + detail + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.end)
return
if (self.repeat is None or len(self.repeat) == 0): return
if (self.due is None): return
d = None
if self.waitonrepeat:
self.wait = 'wait'
self.set_updated()
every = False
after = False
random = False
string = self.repeat
if string in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
every = True
if string == 'decennially':
string = '10years'
elif string == 'biennially':
string = '2years'
elif string == 'annually':
string = 'year'
elif string == 'monthly':
string = 'month'
elif string == 'fortnightly':
string = '2weeks'
elif string == 'weekly':
string = 'week'
elif string == 'daily':
string = 'day'
elif re.match('^every\w+$', string):
every = True
string = string[5:]
elif re.match('^after\w+$', string):
after = True
string = string[5:]
elif re.match('^random$', string):
random = True
if every or after or random:
d = calculate_delta(string)
if d is not None:
# Including case of absolute date
new_due = None
new_start = None
new_alarm = None
detail = ''
if every:
# Ensure at least advanced by one d delta
multi = 1
while (self.due + d * multi) < universe.now:
multi += 1
if multi > 1000:
multi = 1
error('Determining multiple every recur time delta for (>1000) ' + self.name)
break
#print 'A', d * multi
#print 'B', self.due
#print 'C', self.due + d * multi
#multi = 0
#d = d * multi
#dmulti = int((universe.now - self.due).total_seconds() // d.total_seconds())
#if dmulti > 0:
# # Event very overdue, such that subsequent repeats missed
# d = (dmulti + 1) * d
# #print "Multi d event", d, dmulti
new_due = self.due + d * multi
if self.start is not None:
if is_relative_date(self.starttext):
new_start = self.start + d * multi
elif (after or random):
if after:
# Use .replace on datetime object instead?
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + d + timedelta(seconds=shift) + timedelta(microseconds=-universe.now.microsecond)
#
new_due = universe.now.replace(second=0, microsecond=0)
shift = (self.due.hour - new_due.hour) * 60 + self.due.minute - new_due.minute
new_due = new_due + d + timedelta(minutes=shift)
#
elif random:
new_due = universe.now.replace(second=0, microsecond=0) + d
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
if new_due is not None:
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Recur task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
error('Determining recur time delta for ' + self.name + ' string[' + string + ']')
return
def add(self, task):
if len(task.parents) == 1:
lists = []
for c in self.children:
if c.name == task.parents[0]:
lists = c.child_names()
break
if (task.sublist is None) or not (task.sublist in lists):
if (task.sublist is not None) and not (task.sublist in lists):
report(colour.red + 'Selected sublist ' + task.sublist + ' not present, adding to the inbox' + colour.end)
task.sublist = 'inbox'
task.parents.append(task.sublist)
task.sublist = None
match = self
for group in task.parents:
found = False
for child in match.children:
if child.name == group:
found = True
match = child
break
if not found:
inbox = FileTodos(title='inbox', parents=match.parents + [match.name], parent=match, translate=self.translate, level=match.level + 1)
match.add_child(inbox)
match = inbox
found = True
match.set_modified(task)
new = FileTodos(lines=task.reformat().splitlines(), parents=match.parents + [match.name], parent=match)
report(colour.green + 'Adding task to ' + colour.greenbright + 'file' + colour.green + ' in ' + '|'.join(new.parents) + colour.green + ':' + colour.end + ' ' + new.name)
match.add_child(new)
def find_task(self, task):
match = None
if self.is_same_task(task):
return self
for child in self.children:
match = child.find_task(task)
if match is not None:
match = match.find_task(task)
break
return match
def find_tasks_by_name(self, task=None, name=None, matches=None, check_is_wait=False):
if matches is None:
matches = []
if task is not None:
name = task.name
if name == self.name:
if (not check_is_wait or (check_is_wait and self.is_wait()) ):
matches.append(self)
for child in self.children:
matches = child.find_tasks_by_name(name=name, matches=matches)
return matches
def find_task_parent(self, task):
#if task.name in self.child_names():
if self.child_is_task(task):
return self
for child in self.children:
parents = child.find_task_parent(task)
if parents is not None:
return parents
return None
def children_all_completed(self):
allcomplete = True
for child in self.children:
if not child.is_completed:
allcomplete = False
return allcomplete
def uncomplete_childen(self):
self.is_completed = False
for child in self.children:
child.uncomplete_childen()
def unwait_childen(self):
# Assumes working just after uncompleted (for waitonrepeat test)
if self.waitonrepeat:
self.wait = 'wait'
else:
self.wait = ''
for child in self.children:
child.unwait_childen()
def is_repeat(self):
if self.repeat is not None:
if len(self.repeat) > 0:
if self.due is not None:
return True
if self.is_permanent:
return True
return False
def recur(self, task, root=None, recursive=False):
if root is None:
root = self
match = None
removed = False
#if task.name in self.child_names():
if self.child_is_task(task):
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
break
# Should complete/remove any children here - otherwise need to wait for next run
match.uncomplete_childen()
match.unwait_childen()
if ((match.repeat is not None and match.due is not None) or match.is_permanent):
match.do_repeat()
#match.update()
else:
root.remove(task)
removed = True
else:
for child in self.children:
match = child.recur(task, root=root, recursive=True)
if match is not None:
break
if not recursive:
if match is not None:
self.make_modified(match)
if removed: return None
return match
def remove(self, task, root=None, repeats=False, recursive=False):
if root is None:
root = self
match = None
if self.child_is_task(task):
# Check if new tasks become active
if self.is_repeat():
repeats = True
new_children = []
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
if repeats:
match.is_completed = True
else:
new_children.append(child)
if not match.is_header:
if repeats:
action = 'Completing'
else:
self.children = new_children
action = 'Removing'
stat = colour.greenbright + 'OK' + colour.end if match is not None else colour.redbright + 'FAIL' + colour.end
report(colour.red + action + ' task from full tree in' + colour.end + ' ' + colour.redbright + 'file' + '|' + '|'.join(match.parents) + colour.red + ':' + colour.end + ' ' + match.name + ' ' + stat)
else:
if self.is_repeat():
repeats = True
for child in self.children:
match = child.remove(task, root=root, repeats=repeats, recursive=True)
if match is not None:
break
# Check if parent requires removal
if match is not None:
# removed: child, parent: self X actually match?
if child.level > 0:
if child.name == match.parents[-1]:
if (child.is_repeat() or repeats):
if child.children_all_completed():
report(colour.red + ' need to complete parent also, ' + colour.redbright + child.name + colour.end)
# Uncomplete all children of child
child.uncomplete_childen()
child.unwait_childen()
if child.is_repeat():
# Apply repeat to child
child.do_repeat()
else:
self.remove(child, repeats=repeats, recursive=True)
match = child
else:
if not child.has_children():
if not child.is_header:
report(colour.red + ' need to remove parent also, ' + colour.redbright + child.name + colour.end)
self.remove(child, recursive=True)
match = child
if not recursive:
if match is not None:
self.make_modified(match)
return match
def clear_titleoptions(self):
self.starttext = None
self.repeat = None
#self.is_onhold = False
def is_equal(self, other, caldav=False):
if (self.due != other.due):
return False
if (self.alarm != other.alarm):
return False
if (self.note != other.note):
return False
if (self.priority != other.priority):
return False
if (self.wait != other.wait):
return False
if (self.next_action != other.next_action):
return False
#print self.name, '|', self.group(), other.group()
# Don't compare translate if either task is waiting
if (not self.is_wait() and not other.is_wait()):
if (self.translate != other.translate):
#print self.name, '|', self.group(), other.group()
return False
if caldav:
return True
# Optional checks:
# Note not possible for caldav
# start, starttext
#if (self.starttext is not None and other.starttext is not None):
if (self.starttext != other.starttext):
return False
# repeat
#if (self.repeat is not None and other.repeat is not None):
if (self.repeat != other.repeat):
return False
# is_onhold
#if (self.is_onhold is not None and other.is_onhold is not None):
if (self.is_onhold != other.is_onhold):
return False
# flow (no access, add later?)
# is_permanent (no access - add later?)
# is_header (no access from Caldav?)
# is_checklist (not used)
return True
def __eq__(self, other):
if isinstance(other, FileTodos):
return self.is_equal(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __lt__(self, other):
# Check due
if (self.due is None and other.due is not None):
return False
if (self.due is not None and other.due is None):
return True
if ((self.due is not None and other.due is not None) and self.due != other.due):
return self.due < other.due
# Check priorities
if (self.priority is None and other.priority is not None):
return False
if (self.priority is not None and other.priority is None):
return True
if ((self.priority is not None and other.priority is not None) and self.priority != other.priority):
# Note priroties in reverse
return self.priority < other.priority
# Check wait
if (self.is_wait() and not other.is_wait):
return False
if (not self.is_wait() and other.is_wait):
return True
return self.name < other.name
def update(self, task, due=False, note=False, priority=False, wait=False, recursive=False, caldav=False, previous=None, caldavsource=False):
# Also update FileTodo.__eq__
# To stop passing all of the above around...:
if previous is not None:
due = (task.due != previous.due) or (task.alarm != previous.alarm) or due
note = (task.note != previous.note) or note
next_action = (task.next_action != previous.next_action)
#next_action = True
#print '['+previous.next_action+']', '['+task.next_action+']'
priority = (task.priority != previous.priority) or priority
wait = (task.wait != previous.wait) or wait
# new:
#starttext = (task.starttext is not None and previous.starttext is not None) and (task.starttext != previous.starttext)
#repeat = (task.repeat is not None and previous.repeat is not None) and (task.repeat != previous.repeat)
#is_onhold = (task.is_onhold is not None and previous.is_onhold is not None) and (task.is_onhold != previous.is_onhold)
translate = False
if (not task.is_wait() and not previous.is_wait()):
translate = (task.translate != previous.translate)
# Deal with updates on tasks from caldav data (i.e. ensure below are False)
starttext = (task.starttext != previous.starttext) and (not caldavsource)
repeat = (task.repeat != previous.repeat) and (not caldavsource)
is_onhold = (task.is_onhold != previous.is_onhold) and (not caldavsource)
#print 'caldavsource', caldavsource, starttext, repeat, is_onhold, task.name
found = None
#if self.name == task.name:
if self.is_same_task(task):
detail = ''
if priority:
detail = detail + ' priority: %(old)s -> %(new)s' % {
'old': prioritystring(self.priority, shownone=True),
'new': prioritystring(task.priority, shownone=True),
}
self.priority = task.priority
if due:
detail = detail + ' due: %(old)s -> %(new)s, alarm: %(aold)s -> %(anew)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if task.due is None else task.due.strftime('%y%m%d%H%M%z'),
'aold': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'anew': '[empty]' if task.alarm is None else task.alarm.strftime('%y%m%d%H%M%z'),
}
self.due = task.due
self.alarm = task.alarm
# If due becomes None any start is now no longer relevant so ensure it is also cleared
# Might need to do this for alarm too? bit complicated...
if (self.due is None and self.starttext is not None):
detail = detail + ' start: %(old)s -> [empty] (enforced)' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
}
self.starttext = None
if wait:
detail = detail + ' wait: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.wait)+']' if (self.wait is None or self.wait == '') else self.wait,
'new': '[empty:'+str(task.wait)+']' if (task.wait is None or task.wait == '') else task.wait
}
self.wait = task.wait
# asc 131203
# if translate:
# detail = detail + ' translate: %(old)s -> %(new)s' % {
# 'old': '[empty:'+str(self.translate)+']' if (self.translate is None or self.translate == '') else self.translate,
# 'new': '[empty:'+str(task.translate)+']' if (task.translate is None or task.translate == '') else task.translate
# }
# self.translate = task.translate
if note:
detail = detail + ' note: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.note)+']' if (self.note is None or self.note == '') else ' + '.join(self.note.splitlines()),
'new': '[empty:'+str(task.note)+']' if (task.note is None or task.note == '') else ' + '.join(task.note.splitlines()),
}
self.note = task.note
# new
if is_onhold:
detail = detail + ' hold: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.is_onhold)+']' if (self.is_onhold is None or self.is_onhold == '') else self.is_onhold,
'new': '[empty:'+str(task.is_onhold)+']' if (task.is_onhold is None or task.is_onhold == '') else task.is_onhold
}
self.is_onhold = task.is_onhold
if starttext:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
'new': '[empty:'+str(task.starttext)+']' if (task.starttext is None or task.starttext == '') else ' + '.join(task.starttext.splitlines()),
}
self.starttext = task.starttext
if repeat:
detail = detail + ' repeat: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.repeat)+']' if (self.repeat is None or self.repeat == '') else ' + '.join(self.repeat.splitlines()),
'new': '[empty:'+str(task.repeat)+']' if (task.repeat is None or task.repeat == '') else ' + '.join(task.repeat.splitlines()),
}
self.repeat = task.repeat
if next_action:
detail = detail + ' next action: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.next_action)+']' if (self.next_action is None or self.next_action == '') else ' + '.join(self.next_action.splitlines()),
'new': '[empty:'+str(task.next_action)+']' if (task.next_action is None or task.next_action == '') else ' + '.join(task.next_action.splitlines()),
}
self.next_action = task.next_action
#self.sequence_increment()
if caldav:
caltype = 'caldav'
elif recursive:
caltype = 'file'
else:
caltype = 'active'
updated = False
if caldav:
# Assumes have previous
if (due or note or priority or wait or translate or next_action):
from CaldavClient import ical_event_update
ical_event_update(self, due=due, note=note, priority=priority, wait=wait, translate=translate, previous=previous, next_action=next_action)
updated = True
else:
updated = True
if updated:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ' not required and '+ colour.yellowbright +'skipped' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
found = self
else:
for child in self.children:
found = child.update(task, due=due, note=note, priority=priority, wait=wait, recursive=True, caldav=caldav, previous=previous, caldavsource=caldavsource)
if found is not None:
break
if ((not recursive) and (not caldav)):
self.make_modified(found)
return found
def make_modified_parents(self, task=None):
if task is None:
task = self
if len(self.parents) > 1:
self.parent.make_modified_parents(task=task)
elif len(self.parents) == 1:
self.make_modified(task=task)
return
def check_for_modified_children(self, root=True):
modified = False
if self.modified:
modified = True
for child in self.children:
modified = modified or child.check_for_modified_children(root=False)
if root and modified:
self.set_modified()
return modified
def set_modified(self, task=None):
if task is not None:
name = task.name
else:
name = '[not provided]'
if len(self.parents) > 0:
parentstr = self.parents[-1]
else:
parentstr = '[parent unknown]'
report(colour.magenta+'Marking modified ' + parentstr + '|' + self.name + ' for task ' + name + colour.end)
self.modified = True
def make_modified(self, task):
def to_mark(current, task):
if len(current.parents) == 0:
return False
return (task.parents[1] == current.name and task.parents[0] == current.parents[0])
if len(task.parents) < 2:
return
if to_mark(self, task):
if not self.modified:
self.set_modified(task)
else:
for child in self.children:
child.make_modified(task)
def child_names(self):
names = []
for child in self.children:
names.append(child.name)
return names
def has_children(self):
if len(self.children) > 0:
return True
return False
def is_sequential(self):
return self.flow == 'sequential'
def set_wait(self, string=None):
if string is None:
string = 'wait'
self.wait = string
for child in self.children:
child.set_wait(string)
def set_updated(self, follow=True):
self.updated = True
if follow:
for child in self.children:
child.set_updated(follow=follow)
def is_translate(self):
if self.translate is not None:
if len(self.translate) > 0:
return True
return False
def is_wait(self):
if self.wait is not None:
if len(self.wait) > 0:
return True
return False
def is_available(self):
if self.is_onhold:
return False
if self.error:
return False
#if self.is_wait():
# return False
if self.start is not None:
if self.start > universe.now:
return False
return True
def is_expired(self):
if self.expire is not None:
if self.expire <= universe.now:
return True
return False
def is_active(self):
# Exclude the root and projects
if self.level <= 0:
return False
if self.is_header:
return False
if not self.is_available():
return False
if self.parent.is_wait():
# Only include highest wait
return False
#if (self.parent.is_translate_header() and self.parent.is_wait()):
# # Note onhold wipes out children anyway - here wait is special case
# return False
#if ( len(self.translate) > 0 and len(self.parent.translate) == 0 ):
if self.is_translate_header():
# Header of aux list
# Not great returning True here
return True
# Clause for grouped / lists
if ((not self.is_checklist) and (self.has_children())):
return False
# Restricted to next actions, when sequential
return True
def find_all_names(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self.name)
for child in self.children:
todos = child.find_all_names(todos)
return todos
def find_all_tasks(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self)
for child in self.children:
todos = child.find_all_tasks(todos)
return todos
def find_all_task_occurances(self, task, occurances=None):
if occurances == None:
occurances = 0
if self.is_same_task(task):
occurances +=1
#report(' DUPLICATE CALDAV: ' + str(occurances) + ' ' + task.name)
for child in self.children:
occurances = child.find_all_task_occurances(task, occurances)
return occurances
def find_active(self, active=None):
if active == None:
active = []
if self.is_active():
active.append(self)
self.active = True
is_sequential = self.is_sequential()
for child in self.children:
if child.is_completed:
continue
if not child.is_available():
if is_sequential:
break
continue
active = child.find_active(active)
if is_sequential:
break
return active
def is_valid_task(self):
if self.level <= 0:
return False
if self.is_header:
return False
if self.is_onhold:
return False
if self.error:
return False
return True
def find_next_actions(self, set_updated=True, updated=None):
#if 'Meshing ' in self.name:
# verb=True
#else:
# verb=False
if updated is None:
updated = []
next_action = self.find_next_action()
#if verb: print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
if self.next_action != next_action:
self.next_action = next_action
if set_updated:
self.set_updated(follow=False)
updated.append(self.name)
#print ' UPDATED', self.name
#print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
for child in self.children:
child.find_next_actions(set_updated=set_updated, updated=updated)
return updated
def find_next_action(self):
next_action = None
if self.level <= 0:
return None
if self.parent.is_sequential():
neighbours = self.parent.children
found = False
for neighbour in neighbours:
if found:
if neighbour.is_valid_task():
next_action = neighbour
break
elif neighbour.name == self.name:
found = True
if next_action is None:
return self.parent.find_next_action()
else:
return next_action.name
# next_actions = []
# if len(self.parents) == 0:
# return next_actions
# p = self.parents[-1]
# if not p.is_sequential():
# return next_actions
def find_error(self, error=None):
if error == None:
error = []
if self.error:
error.append(self)
for child in self.children:
error = child.find_error(error)
return error
def show_error(self, show_notes=False):
errors = self.find_error()
if len(errors) == 0: return
report(colour.redbright + 'ERROR' + colour.end)
for task in errors:
report(task.to_string(indentnone=True, notes=show_notes, show_where=True), forced=True)
def is_important(self):
return (self.priority is not None)
def is_due_on_day(self, day):
if self.due is None:
return False
if self.due.year != day.year:
return False
if self.due.month != day.month:
return False
if self.due.day != day.day:
return False
return True
def is_overdue(self):
if self.due is None:
return False
return universe.now > self.due
def is_due_today(self):
return self.is_due_on_day(universe.now)
def is_due_tomorrow(self):
return self.is_due_on_day(universe.now + timedelta(days=1))
def is_overdue_yesterday_or_past(self):
return (self.is_overdue() and (not self.is_due_today()))
def is_overdue_today_tomorrow_important(self):
return (self.is_overdue() or self.is_due_today() or self.is_due_tomorrow() or self.is_important())
def make_due_today(self, displacement=0, avoid_weekends=False):
new_due = None
new_start = None
new_alarm = None
detail = ''
# shift from now time to due time, all today
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + timedelta(seconds=shift)
if self.repeat == 'random':
new_due = universe.now.replace(second=0, microsecond=0) + calculate_delta('random')
else:
new_due = universe.now.replace(hour=self.due.hour, minute=self.due.minute, second=0, microsecond=0)
# Apply displacement days
new_due = new_due + timedelta(days=displacement)
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
# Update start time
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
# Update alarm
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Update due to today for important task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
self.make_modified_parents()
return
def prioritycurrent(self, caldav=False):
# Make tasks with a priority that have a due time in the previous days or past,
# due today at the same time
# Only applied to current active list?
#print self.name
if ((self.is_important() or self.current) and self.is_overdue_yesterday_or_past()):
#print 'HERE', self.name
try:
# Check here if in skipweekendlists
avoid_weekends = ((self.group(masked=False) in universe.skipweekendlists) or self.avoidweekends)
# self.make_due_next_work_day()
self.make_due_today(avoid_weekends=avoid_weekends)
# state.aiyo.make_modified(self)
if caldav:
from CaldavClient import ical_event_update
ical_event_update(self, due=True)
else:
self.set_modified()
except Exception, e:
out = os.linesep + ' Task: ' + self.name + ' ' + self.due.strftime('%y%m%d%H%M')
error('Error in making a priority task current, exception: ' + str(e) + out)
pass
def to_string(self, reformat=False, indentfull=False, indentnone=False, notes=True, show_where=False, show_next_action=False, show_translate_inheritance=False):
iro = generate_mono(reformat)
contentlist = []
if self.lines is not None:
for i in range(len(self.lines)):
contentlist.append('%(num)6d %(indent)2d %(content)s' % { 'num':i, 'indent':indentation(self.lines[i]), 'content':self.lines[i] })
content = os.linesep.join(contentlist)
if not notes:
note = ''
elif self.note is None:
note = ''
elif len(self.note) == 0:
note = ''
else:
note = os.linesep + os.linesep.join([ ' ' * 4 + notelines for notelines in self.note.splitlines() ])
note = iro.grey + note + iro.end
out_due = ''
out_due_date = None
if self.due is not None:
out_due_date = self.due
elif self.alarm is not None:
out_due_date = self.alarm
else:
out_due = ''
if out_due_date is not None:
if self.allday():
out_due = out_due_date.strftime('%y%m%d')
else:
out_due = out_due_date.strftime('%y%m%d%H%M')
# Work out diff
if self.alarm is not None:
out_alarm = self.alarm.strftime('%y%m%d%H%M')
if self.due is not None:
d = self.alarm - self.due
if (self.allday() and d == universe.defaulttime.alldaydiff):
out_alarm = ''
elif (not self.allday() and d == universe.defaulttime.diff):
out_alarm = ''
else:
dh = timedelta_to_human(d)
if dh is not None:
out_alarm = dh
else:
out_alarm = ''
if len(out_alarm) > 0:
out_alarm = ' !' + out_alarm
out_priority = prioritystring(self.priority, spacer=True)
translate = ''
if self.translate is not None:
if self.is_translate():
if (self.parent is None or show_translate_inheritance):
translate = ' =' + self.translate
else:
if not self.parent.is_translate():
translate = ' =' + self.translate
#print self.name, self.translate, translate, self.parent
if show_where:
parents = ' ' + (iro.grey+':'+iro.end).join([ iro.grey + x + iro.end for x in self.parents ])
else:
parents = ''
if show_next_action and (self.next_action is not None) and (len(str(self.next_action)) > 0):
next_action = ' ' + iro.green + universe.next_char + str(self.next_action) + iro.end
else:
next_action = ''
if self.is_overdue():
highlight_name = iro.redbright
elif self.is_due_today():
highlight_name = iro.red
elif self.is_due_tomorrow():
highlight_name = iro.yellow
elif self.priority is not None:
highlight_name = iro.yellow
else:
highlight_name = ''
options = '''\
%(spacer)s%(start)s%(divider)s%(due)s%(expire)s%(alarm)s%(priority)s%(repeat)s%(translate)s%(checklist)s%(flow)s%(header)s%(waitonrepeat)s%(permanent)s%(current)s%(avoidweekends)s%(wait)s%(paused)s%(completed)s%(parents)s%(next)s%(error)s''' \
% {
'start': '' if (self.starttext is None or len(self.starttext) == 0) else iro.cyan + self.starttext + iro.end,
'due': iro.blue + out_due + iro.blue,
'alarm': iro.red + out_alarm + iro.end,
'priority': iro.redbright + out_priority + iro.end,
'divider': '' if (self.starttext is None or len(self.starttext) == 0 ) else iro.grey + ':' + iro.end,
'repeat': '' if (self.repeat is None or len(self.repeat) == 0) else ' ' + iro.magenta + self.repeat + iro.end,
'expire': '' if (self.expiretext is None or len(self.expiretext) == 0) else ' ' + iro.magenta + self.expiretext + iro.end,
'spacer': '' if ((self.starttext is None or len(self.starttext) == 0) and (len(out_due) == 0)) else ' ',
'translate': iro.yellow + translate + iro.end,
'checklist': iro.yellow+' checklist'+iro.end if self.is_checklist else '',
'header': iro.yellow+' header'+iro.end if self.is_header else '',
'completed': iro.green+' completed'+iro.end if self.is_completed else '',
'paused': iro.blue+' hold'+iro.end if self.is_onhold else '',
'permanent': iro.magenta+' permanent'+iro.end if self.is_permanent else '',
'current': iro.magenta+' current'+iro.end if self.current else '',
'avoidweekends': iro.magenta+' avoidweekends'+iro.end if self.avoidweekends else '',
'wait': ' ' + iro.blue+self.wait+iro.end if self.is_wait() else '',
'waitonrepeat': iro.blue+' waitonrepeat'+iro.end if self.waitonrepeat else '',
'error': iro.redbright+' ERROR'+iro.end if self.error else '',
'flow': iro.magenta+' ' + self.flowtext+iro.end if self.flowtext is not None else '',
'parents': parents,
'next': next_action,
}
text = '''%(name)s%(spacer)s%(options)s%(note)s''' \
% {
'name': highlight_name + self.name + iro.end,
'spacer': '' if len(options) == 0 else ' ',
'options': options,
'note': note,
}
if indentnone:
indent = 2
else:
indentmod = 0
if indentfull:
indentmod = 2
if reformat:
indentmod = -1
indent = (self.level + indentmod) * 2
text = os.linesep.join([ ' ' * indent + notelines for notelines in text.splitlines() ])
return text
def __str__(self):
return self.to_string()
def find_children(self):
for i in range(len(self.childblocks)):
block = self.childblocks[i]
parents = []
for p in self.parents + [self.name]:
parents.append(p)
child = FileTodos(self.lines[block[0]:block[1]], parents = parents, number=i+1, parent=self, translate=self.translate)
self.add_child(child)
def find_note(self):
if self.lines is None: return ''
if len(self.lines) == 0: return ''
if self.level == 0:
if indentation(self.lines[0]) < self.level + 1: return ''
else:
if len(self.lines) == 1: return ''
if indentation(self.lines[1]) < self.level + 1: return ''
note = []
for i in range(len(self.lines)):
if ((self.level > 0) and (i == 0)): continue
if indentation(self.lines[i]) < self.level + 1: break
note.append(re.sub('^'+ ' ' * (self.level + 1) * 2, '', self.lines[i]))
if len(note) == 0:
return ''
return os.linesep.join(note)
def set_note(self, obj):
self.note = obj
def add_child(self, obj):
obj.parent = self
self.children.append(obj)
def set_block(self, obj):
self.block = obj
def set_childblocks(self, obj):
self.childblocks = obj
def show_tree(self, indentfull=True, notes=True, activeonly=False, availableonly=False):
if ((activeonly or availableonly) and not self.is_available()): return
if (activeonly and not self.is_active()): return
report(self.to_string(indentfull=indentfull, notes=notes), forced=True)
for child in self.children:
child.show_tree(indentfull=indentfull, notes=notes, activeonly=activeonly, availableonly=availableonly)
def reformat(self):
output = ''
if self.level > 0:
output = self.to_string(reformat=True) + os.linesep
for child in self.children:
output = output + child.reformat()
if (self.level == 0 and self.filenotes is not None):
output = output + os.linesep.join(['',''] + self.filenotes)
return output
def write(self, name=None, category=None):
if not self.modified: return False
if name is None:
name = self.name
if len(self.parents) > 0:
category = self.parents[0]
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/'
if not os.path.exists(filename):
# Could be case here where file exists in place of foldername, this will cause trouble!
os.mkdir(filename)
filename = filename + name
repo_in = os.path.exists(filename)
report(colour.grey + 'Writing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(to' + colour.grey + ' ' + filename + colour.grey + ')' + colour.end)
if not universe.dry:
f = open(filename, 'w')
f.write(self.reformat().encode('utf-8'))
f.close()
if not repo_in:
repo_add(filename)
if self.is_empty():
report(' ' + colour.grey + 'Removing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return True
def identify_blocks(self, start=None, end=None):
lines_to_excluded_section = 2
debug = False
#debug = (self.name == 'finance')
if self.lines is None:
return []
def add_block(r):
blocks.append(r)
if debug: print ' ', r
blocks = []
if start is None:
start = 0
if end is None:
end = len(self.lines)
if len(self.lines) <= 1: return blocks
r = [ -1, -1 ]
blanks = 0
for i in range(start, end):
line = self.lines[i]
indent = indentation(line)
if debug: print i, blanks, r, indent, line
if len(line) == 0:
blanks += 1
continue
# Indent is of current level
if indent == self.level:
# Existing block
if (r[0] > -1 and r[1] == -1):
if debug: print 'complete', blanks, blanks >= 2
r[1] = i
add_block(r)
r = [ -1, -1 ]
if r[0] == -1:
if debug: print 'new'
# If 2 or more previous blanks AND now indent = level
if blanks >= lines_to_excluded_section: break
# Start new block
if len(line.strip()) > 0:
r[0] = i
blanks = 0
# Add concluding block, if one has begun
if ((r[0] > -1) and (r[1] == -1)):
r[1] = i + 1
add_block(r)
if debug: print self.name, blocks
if debug:
report('XXXX'+ self.name)
print blocks
if len(blocks) > 0: print os.linesep.join(self.lines[blocks[-1][0]:blocks[-1][1]])
sys.exit(1)
return blocks
def interpret_task(self, title):
sections = title.split(' ', 1)
if len(sections) == 2:
# Check if len(sections[1]) > 0?
self.name = sections[0]
title = sections[1]
else:
self.name = title
title = ''
words = title.split(' ')
titlelist = []
for word in words:
# NLP not working here, as cannot apply set_modified at this early point of parsing,
# would need to mark to update aiyo at a later stage, once the FileTodo object
# has been set up.
if re.match('^today$', word):
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^tomorrow$', word):
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] \
+ ['mon', 'tues', 'tue', 'wed', 'thurs', 'thu', 'thur', 'fri', 'sat', 'sun']:
self.duetext = next_weekday(word)
self.set_modified()
elif re.match('^\d*(day|week|month|year)s*$', word):
self.duetext = next_increment(word)
self.set_modified()
elif re.match('^\w+:today$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:tomorrow$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:(monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tues|tue|wed|thurs|thu|thur|fri|sat|sun)$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_weekday(self.duetext)
self.set_modified()
elif re.match('^\w+:\d*(day|week|month|year)s*$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_increment(self.duetext)
self.set_modified()
elif re.match('^\d{6}$', word):
self.duetext = word
elif re.match('^\d{10}$', word):
self.duetext = word
elif re.match('^\d{6}:$', word):
self.starttext = word[:-1]
elif re.match('^\d{10}:$', word):
self.starttext = word[:-1]
elif re.match('^\w+:\d{6}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:\d{10}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:$', word):
self.starttext = word[:-1]
elif re.match('^!\d{6}$', word):
self.alarmtext = word[1:]
elif re.match('^!\d{10}$', word):
self.alarmtext = word[1:]
elif (re.match('^!\w+$', word) and is_relative_date(word)):
self.alarmtext = word[1:]
elif re.match('^!$', word):
self.priority = 9
elif re.match('^!!$', word):
self.priority = 5
elif re.match('^!!!$', word):
self.priority = 1
elif re.match('^every\w+$', word):
self.repeat = word
elif re.match('^after\w+$', word):
self.repeat = word
elif re.match('^random$', word):
self.repeat = word
elif word in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
self.repeat = word
elif re.match('^expire\w+$', word):
self.expiretext = word
elif re.match('^checklist$', word):
self.is_checklist = True
elif re.match('^sequential$', word):
self.flowtext = 'sequential'
elif re.match('^parallel$', word):
self.flowtext = 'parallel'
elif re.match('^header$', word):
self.is_header = True
elif re.match('^completed$', word):
self.is_completed = True
elif re.match('^paused$', word):
self.is_onhold = True
elif re.match('^onhold$', word):
self.is_onhold = True
elif re.match('^hold$', word):
self.is_onhold = True
elif re.match('^permanent$', word):
self.is_permanent = True
elif re.match('^avoidweekends$', word):
self.avoidweekends = True
elif re.match('^current$', word):
self.current = True
#elif re.match('^everpresent$', word):
# self.is_everpresent = True
elif re.match('^waitonrepeat$', word):
self.waitonrepeat = True
#self.wait = 'wait'
elif re.match('^wait$', word):
self.wait = word
elif re.match('^ERROR$', word):
self.error = True
# asc
elif re.match('^=\w+$', word):
self.translate = word[1:]
elif re.match('^@\w+$', word):
self.sublist = word[1:]
else:
titlelist.append(word)
if self.flowtext is not None:
self.flow = self.flowtext
| gpl-3.0 | -4,814,139,854,392,383,000 | 34.845088 | 271 | 0.593531 | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/__init__.py | 1 | 5495 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .point2_d_py3 import Point2D
from .normalized_quadrilateral_py3 import NormalizedQuadrilateral
from .image_tag_region_py3 import ImageTagRegion
from .image_action_py3 import ImageAction
from .image_tag_py3 import ImageTag
from .organization_py3 import Organization
from .aggregate_rating_py3 import AggregateRating
from .offer_py3 import Offer
from .aggregate_offer_py3 import AggregateOffer
from .images_image_metadata_py3 import ImagesImageMetadata
from .image_object_py3 import ImageObject
from .image_knowledge_py3 import ImageKnowledge
from .response_py3 import Response
from .identifiable_py3 import Identifiable
from .error_py3 import Error
from .error_response_py3 import ErrorResponse, ErrorResponseException
from .thing_py3 import Thing
from .action_py3 import Action
from .media_object_py3 import MediaObject
from .response_base_py3 import ResponseBase
from .creative_work_py3 import CreativeWork
from .person_py3 import Person
from .intangible_py3 import Intangible
from .image_entity_action_py3 import ImageEntityAction
from .images_module_py3 import ImagesModule
from .image_module_action_py3 import ImageModuleAction
from .recipe_py3 import Recipe
from .recipes_module_py3 import RecipesModule
from .image_recipes_action_py3 import ImageRecipesAction
from .query_py3 import Query
from .related_searches_module_py3 import RelatedSearchesModule
from .image_related_searches_action_py3 import ImageRelatedSearchesAction
from .image_shopping_sources_action_py3 import ImageShoppingSourcesAction
from .structured_value_py3 import StructuredValue
from .properties_item_py3 import PropertiesItem
from .rating_py3 import Rating
from .crop_area_py3 import CropArea
from .image_info_py3 import ImageInfo
from .filters_py3 import Filters
from .knowledge_request_py3 import KnowledgeRequest
from .visual_search_request_py3 import VisualSearchRequest
except (SyntaxError, ImportError):
from .point2_d import Point2D
from .normalized_quadrilateral import NormalizedQuadrilateral
from .image_tag_region import ImageTagRegion
from .image_action import ImageAction
from .image_tag import ImageTag
from .organization import Organization
from .aggregate_rating import AggregateRating
from .offer import Offer
from .aggregate_offer import AggregateOffer
from .images_image_metadata import ImagesImageMetadata
from .image_object import ImageObject
from .image_knowledge import ImageKnowledge
from .response import Response
from .identifiable import Identifiable
from .error import Error
from .error_response import ErrorResponse, ErrorResponseException
from .thing import Thing
from .action import Action
from .media_object import MediaObject
from .response_base import ResponseBase
from .creative_work import CreativeWork
from .person import Person
from .intangible import Intangible
from .image_entity_action import ImageEntityAction
from .images_module import ImagesModule
from .image_module_action import ImageModuleAction
from .recipe import Recipe
from .recipes_module import RecipesModule
from .image_recipes_action import ImageRecipesAction
from .query import Query
from .related_searches_module import RelatedSearchesModule
from .image_related_searches_action import ImageRelatedSearchesAction
from .image_shopping_sources_action import ImageShoppingSourcesAction
from .structured_value import StructuredValue
from .properties_item import PropertiesItem
from .rating import Rating
from .crop_area import CropArea
from .image_info import ImageInfo
from .filters import Filters
from .knowledge_request import KnowledgeRequest
from .visual_search_request import VisualSearchRequest
from .visual_search_client_enums import (
Currency,
ItemAvailability,
ErrorCode,
ErrorSubCode,
SafeSearch,
)
__all__ = [
'Point2D',
'NormalizedQuadrilateral',
'ImageTagRegion',
'ImageAction',
'ImageTag',
'Organization',
'AggregateRating',
'Offer',
'AggregateOffer',
'ImagesImageMetadata',
'ImageObject',
'ImageKnowledge',
'Response',
'Identifiable',
'Error',
'ErrorResponse', 'ErrorResponseException',
'Thing',
'Action',
'MediaObject',
'ResponseBase',
'CreativeWork',
'Person',
'Intangible',
'ImageEntityAction',
'ImagesModule',
'ImageModuleAction',
'Recipe',
'RecipesModule',
'ImageRecipesAction',
'Query',
'RelatedSearchesModule',
'ImageRelatedSearchesAction',
'ImageShoppingSourcesAction',
'StructuredValue',
'PropertiesItem',
'Rating',
'CropArea',
'ImageInfo',
'Filters',
'KnowledgeRequest',
'VisualSearchRequest',
'Currency',
'ItemAvailability',
'ErrorCode',
'ErrorSubCode',
'SafeSearch',
]
| mit | -5,166,224,929,300,869,000 | 35.390728 | 77 | 0.722839 | false |
mlk/thefuck | tests/rules/test_dirty_unzip.py | 1 | 2175 | # -*- coding: utf-8 -*-
import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
from unicodedata import normalize
@pytest.fixture
def zip_error(tmpdir):
def zip_error_inner(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.writestr('d/e', '4')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
dir_list = os.listdir(u'.')
if filename not in dir_list:
filename = normalize('NFD', filename)
assert set(dir_list) == {filename, 'a', 'b', 'c', 'd'}
assert set(os.listdir('./d')) == {'e'}
return zip_error_inner
@pytest.mark.parametrize('script,filename', [
(u'unzip café', u'café.zip'),
(u'unzip café.zip', u'café.zip'),
(u'unzip foo', u'foo.zip'),
(u'unzip foo.zip', u'foo.zip')])
def test_match(zip_error, script, filename):
zip_error(filename)
assert match(Command(script=script))
@pytest.mark.parametrize('script,filename', [
(u'unzip café', u'café.zip'),
(u'unzip café.zip', u'café.zip'),
(u'unzip foo', u'foo.zip'),
(u'unzip foo.zip', u'foo.zip')])
def test_side_effect(zip_error, script, filename):
zip_error(filename)
side_effect(Command(script=script), None)
dir_list = os.listdir(u'.')
if filename not in set(dir_list):
filename = normalize('NFD', filename)
assert set(dir_list) == {filename, 'd'}
@pytest.mark.parametrize('script,fixed,filename', [
(u'unzip café', u"unzip café -d 'café'", u'café.zip'),
(u'unzip foo', u'unzip foo -d foo', u'foo.zip'),
(u"unzip 'foo bar.zip'", u"unzip 'foo bar.zip' -d 'foo bar'", u'foo.zip'),
(u'unzip foo.zip', u'unzip foo.zip -d foo', u'foo.zip')])
def test_get_new_command(zip_error, script, fixed, filename):
zip_error(filename)
assert get_new_command(Command(script=script)) == fixed
| mit | 3,121,337,484,747,704,300 | 29.464789 | 78 | 0.601942 | false |
Thortoise/Super-Snake | Blender/animation_nodes-master/sockets/shape_key.py | 1 | 2668 | import bpy
from bpy.props import *
from bpy.types import ShapeKey
from .. events import propertyChanged
from .. base_types.socket import AnimationNodeSocket
from .. utils.id_reference import tryToFindObjectReference
class ShapeKeySocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeySocket"
bl_label = "Shape Key Socket"
dataType = "Shape Key"
allowedInputTypes = ["Shape Key"]
drawColor = (1.0, 0.6, 0.5, 1)
storable = False
comparable = True
objectName = StringProperty(update = propertyChanged,
description = "Load the second shape key of this object (the first that is not the reference key)")
def drawProperty(self, layout, text, node):
row = layout.row(align = True)
row.prop_search(self, "objectName", bpy.context.scene, "objects", icon = "NONE", text = text)
self.invokeFunction(row, node, "assignActiveObject", icon = "EYEDROPPER")
def getValue(self):
object = self.getObject()
if object is None: return None
if object.type not in ("MESH", "CURVE", "LATTICE"): return None
if object.data.shape_keys is None: return None
try: return object.data.shape_keys.key_blocks[1]
except: return None
def getObject(self):
if self.objectName == "": return None
object = tryToFindObjectReference(self.objectName)
name = getattr(object, "name", "")
if name != self.objectName: self.objectName = name
return object
def updateProperty(self):
self.getObject()
def assignActiveObject(self):
object = bpy.context.active_object
if object:
self.objectName = object.name
@classmethod
def getDefaultValue(cls):
return None
@classmethod
def correctValue(cls, value):
if isinstance(value, ShapeKey) or value is None:
return value, 0
return cls.getDefaultValue(), 2
class ShapeKeyListSocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeyListSocket"
bl_label = "Shape Key List Socket"
dataType = "Shape Key List"
baseDataType = "Shape Key"
allowedInputTypes = ["Shape Key List"]
drawColor = (1.0, 0.6, 0.5, 0.5)
storable = False
comparable = False
@classmethod
def getDefaultValue(cls):
return []
@classmethod
def getDefaultValueCode(cls):
return "[]"
@classmethod
def correctValue(cls, value):
if isinstance(value, list):
if all(isinstance(element, ShapeKey) or element is None for element in value):
return value, 0
return cls.getDefaultValue(), 2
| gpl-3.0 | -1,726,559,515,112,783,400 | 30.761905 | 107 | 0.652174 | false |
stanfordnmbl/osim-rl | tests/test.round2.py | 1 | 1833 | from osim.env import ProstheticsEnv, rect
import numpy as np
import unittest
import math
class SimulationTest(unittest.TestCase):
def test_reset(self):
env = ProstheticsEnv(visualize=False, difficulty=0)
o = env.reset()
self.assertEqual(type(o), list)
o = env.reset(project = False)
self.assertEqual(type(o), dict)
o = env.reset(project = True)
self.assertEqual(type(o), list)
action = env.action_space.sample()
o,r,d,i = env.step(action, project = False)
self.assertRaises(KeyError, lambda : o["target_vel"])
env = ProstheticsEnv(visualize=False, difficulty=1)
env.reset()
o,r,d,i = env.step(action, project = False)
self.assertEqual(len(o["target_vel"]), 3)
env.generate_new_targets(10)
for i in range(20):
o,r,d,i = env.step(action, project = False)
self.assertGreater(rect([2, 0])[0], 1.99)
self.assertLess(rect([2, math.pi/2.0])[0], 0.01)
env.reset()
env.generate_new_targets(10)
# After 300 steps we should be far
self.assertGreater(np.sum( (env.targets[300,:] - np.array([1.25,0,0]))**2 ), 0.01)
state = env.osim_model.get_state()
env.osim_model.get_joint("ground_pelvis").get_coordinates(0).setSpeedValue(state, 5)
env.osim_model.set_state(state)
o1,r1,d,i = env.step(action, project = False)
env.osim_model.get_joint("ground_pelvis").get_coordinates(0).setSpeedValue(state, 1.25)
env.osim_model.set_state(state)
o2,r2,d,i = env.step(action, project = False)
self.assertGreater(o1["joint_vel"]["ground_pelvis"],o2["joint_vel"]["ground_pelvis"])
self.assertGreater(r2,r1)
if __name__ == '__main__':
unittest.main()
| mit | 8,473,579,778,601,105,000 | 34.941176 | 95 | 0.599018 | false |
almostearthling/when-wizard | share/when-wizard/plugins/cond-event-batterylow.py | 1 | 1402 | # file: share/when-wizard/templates/cond-event-batterylow.py
# -*- coding: utf-8 -*-
#
# Condition plugin for the low battery event
# Copyright (c) 2015-2018 Francesco Garosi
# Released under the BSD License (see LICENSE file)
import locale
from plugin import EventConditionPlugin, PLUGIN_CONST, plugin_name
# setup i18n for both applet text and dialogs
locale.setlocale(locale.LC_ALL, locale.getlocale())
locale.bindtextdomain(APP_NAME, APP_LOCALE_FOLDER)
locale.textdomain(APP_NAME)
_ = locale.gettext
HELP = _("""\
This event will occur when the battery is considered critically low by the
system: use this only if the event is not caught by the system itself, for
example by hibernating the computer.
""")
EVENT_SYSTEM_BATTERY_LOW = 'battery_low'
class Plugin(EventConditionPlugin):
def __init__(self):
EventConditionPlugin.__init__(
self,
basename=plugin_name(__file__),
name=_("Low Battery"),
description=_("The Battery is Critically Low"),
author=APP_AUTHOR,
copyright=APP_COPYRIGHT,
icon='low_battery',
help_string=HELP,
version=APP_VERSION,
)
self.category = PLUGIN_CONST.CATEGORY_COND_POWER
self.stock = True
self.event = EVENT_SYSTEM_BATTERY_LOW
self.summary_description = _("When the battery is critically low")
# end.
| bsd-3-clause | 3,910,088,821,444,205,000 | 28.208333 | 74 | 0.669044 | false |
siddartha1992/cloud-custodian | tools/c7n_mailer/tests/test_email.py | 1 | 9174 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import copy
import os
import unittest
import six
from c7n_mailer.email_delivery import EmailDelivery
from common import logger, get_ldap_lookup
from common import MAILER_CONFIG, RESOURCE_1, SQS_MESSAGE_1
from mock import patch, call
# note principalId is very org/domain specific for federated?, it would be good to get
# confirmation from capone on this event / test.
CLOUDTRAIL_EVENT = {
'detail': {
'userIdentity': {
"type": "IAMUser",
"principalId": "AIDAJ45Q7YFFAREXAMPLE",
"arn": "arn:aws:iam::123456789012:user/michael_bolton",
"accountId": "123456789012",
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"userName": "michael_bolton"
}
}
}
class MockEmailDelivery(EmailDelivery):
def get_ldap_connection(self):
return get_ldap_lookup(cache_engine='redis')
class EmailTest(unittest.TestCase):
def setUp(self):
self.aws_session = boto3.Session()
self.email_delivery = MockEmailDelivery(MAILER_CONFIG, self.aws_session, logger)
self.email_delivery.ldap_lookup.uid_regex = ''
tests_dir = '/tools/c7n_mailer/tests/'
template_abs_filename = '%s%sexample.jinja' % (os.path.abspath(os.curdir), tests_dir)
SQS_MESSAGE_1['action']['template'] = template_abs_filename
def test_valid_email(self):
self.assertFalse(self.email_delivery.target_is_email('foobar'))
self.assertFalse(self.email_delivery.target_is_email('foo@bar'))
self.assertTrue(self.email_delivery.target_is_email('[email protected]'))
def test_priority_header_is_valid(self):
self.assertFalse(self.email_delivery.priority_header_is_valid('0'))
self.assertFalse(self.email_delivery.priority_header_is_valid('-1'))
self.assertFalse(self.email_delivery.priority_header_is_valid('6'))
self.assertFalse(self.email_delivery.priority_header_is_valid('sd'))
self.assertTrue(self.email_delivery.priority_header_is_valid('1'))
self.assertTrue(self.email_delivery.priority_header_is_valid('5'))
def test_get_valid_emails_from_list(self):
list_1 = [
'[email protected]',
'lsdk',
'resource-owner',
'event-owner',
'[email protected]'
]
valid_emails = self.email_delivery.get_valid_emails_from_list(list_1)
self.assertEqual(valid_emails, ['[email protected]', '[email protected]'])
def test_event_owner_ldap_flow(self):
targets = ['event-owner']
username = self.email_delivery.get_aws_username_from_event(CLOUDTRAIL_EVENT)
self.assertEqual(username, 'michael_bolton')
michael_bolton_email = self.email_delivery.get_event_owner_email(targets, CLOUDTRAIL_EVENT)
self.assertEqual(michael_bolton_email, ['[email protected]'])
def test_get_ldap_emails_from_resource(self):
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = False
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['[email protected]'])
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = True
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['[email protected]', '[email protected]'])
def test_email_to_resources_map_with_ldap_manager(self):
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE_1
)
# make sure only 1 email is queued to go out
self.assertEqual(len(emails_to_resources_map.items()), 1)
to_emails = ('[email protected]', '[email protected]', '[email protected]')
self.assertEqual(emails_to_resources_map, {to_emails: [RESOURCE_1]})
def test_email_to_email_message_map_without_ldap_manager(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['policy']['actions'][1].pop('email_ldap_username_manager', None)
email_addrs_to_email_message_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
to_emails = ('[email protected]', '[email protected]', '[email protected]')
items = list(email_addrs_to_email_message_map.items())
self.assertEqual(items[0][0], to_emails)
self.assertEqual(items[0][1]['to'], ', '.join(to_emails))
def test_smtp_called_once(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg['X-Priority'], '1')
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 1)
# Check the mock' calls are equal to a specific list of calls in a
# specific order
to_addrs = ['[email protected]', '[email protected]', '[email protected]']
self.assertEqual(
smtp_instance.sendmail.mock_calls,
[call(MAILER_CONFIG['from_address'], to_addrs, mimetext_msg.as_string())]
)
def test_smtp_called_multiple_times(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg.get('X-Priority'), None)
# self.assertEqual(mimetext_msg.get('X-Priority'), None)
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 2)
def test_emails_resource_mapping_multiples(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
email_1_to_addrs = ('[email protected]', '[email protected]', '[email protected]')
email_2_to_addrs = ('[email protected]',)
self.assertEqual(emails_to_resources_map[email_1_to_addrs], [RESOURCE_1])
self.assertEqual(emails_to_resources_map[email_2_to_addrs], [RESOURCE_2])
def test_no_mapping_if_no_valid_emails(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action']['to'].remove('ldap_uid_tags')
SQS_MESSAGE['resources'][0].pop('Tags', None)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
self.assertEqual(emails_to_resources_map, {})
| apache-2.0 | -7,470,777,053,981,479,000 | 43.75122 | 99 | 0.629823 | false |
ghackebeil/PyORAM | src/pyoram/storage/block_storage.py | 1 | 3293 | __all__ = ('BlockStorageTypeFactory',)
import logging
log = logging.getLogger("pyoram")
def BlockStorageTypeFactory(storage_type_name):
if storage_type_name in BlockStorageTypeFactory._registered_devices:
return BlockStorageTypeFactory.\
_registered_devices[storage_type_name]
else:
raise ValueError(
"BlockStorageTypeFactory: Unsupported storage "
"type: %s" % (storage_type_name))
BlockStorageTypeFactory._registered_devices = {}
def _register_device(name, type_):
if name in BlockStorageTypeFactory._registered_devices:
raise ValueError("Can not register block storage device type "
"with name '%s'. A device type is already "
"registered with that name." % (name))
if not issubclass(type_, BlockStorageInterface):
raise TypeError("Can not register block storage device type "
"'%s'. The device must be a subclass of "
"BlockStorageInterface" % (type_))
BlockStorageTypeFactory._registered_devices[name] = type_
BlockStorageTypeFactory.register_device = _register_device
class BlockStorageInterface(object):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# Abstract Interface
#
def clone_device(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def compute_storage_size(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def setup(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_count(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_size(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def storage_name(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def update_header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def close(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def yield_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def bytes_sent(self):
raise NotImplementedError # pragma: no cover
@property
def bytes_received(self):
raise NotImplementedError # pragma: no cover
| mit | -4,740,640,862,702,392,000 | 38.674699 | 73 | 0.580626 | false |
henriquegemignani/randovania | randovania/cli/__init__.py | 1 | 1781 | import argparse
import logging
import os
import sys
from pathlib import Path
import randovania
def create_subparsers(root_parser):
from randovania.cli import echoes, server, gui, prime_database
echoes.create_subparsers(root_parser)
prime_database.create_subparsers(root_parser)
server.create_subparsers(root_parser)
gui.create_subparsers(root_parser)
def _print_version(args):
print("Randovania {} from {}".format(
randovania.VERSION,
os.path.dirname(randovania.__file__)))
def _create_parser():
parser = argparse.ArgumentParser()
create_subparsers(parser.add_subparsers(dest="game"))
parser.add_argument("--version", action="store_const",
const=_print_version, dest="func")
parser.add_argument("--configuration", type=Path,
help="Use the given configuration path instead of the included one.")
return parser
def _run_args(parser, args):
if args.configuration is not None:
randovania.CONFIGURATION_FILE_PATH = args.configuration.absolute()
if args.func is None:
parser.print_help()
raise SystemExit(1)
logging.info("Executing from args...")
args.func(args)
def run_pytest(argv):
import pytest
import pytest_asyncio.plugin
import pytest_mock.plugin
sys.exit(pytest.main(argv[2:], plugins=[pytest_asyncio.plugin, pytest_mock.plugin]))
def run_cli(argv):
if len(argv) > 1 and argv[1] == "--pytest":
run_pytest(argv)
else:
args = argv[1:]
from randovania.cli import gui
if gui.has_gui and not args:
args = ["gui", "main"]
logging.info("Creating parsers...")
parser = _create_parser()
_run_args(parser, parser.parse_args(args))
| gpl-3.0 | -388,946,729,775,054,140 | 25.984848 | 93 | 0.653565 | false |
FABtotum/FAB-UI | recovery/python/boot.py | 1 | 1621 | import os, sys
import time
import serial
import json
import ConfigParser
from subprocess import call
config = ConfigParser.ConfigParser()
config.read('/var/www/lib/config.ini')
#startup script (see crontab)
#print "Boot script"
#time.sleep(60) #wait 60 seconds so connections can be made.
#print "Start"
#tell the board that the raspi has been connected.
#settting serial communication
serail_port = config.get('serial', 'port')
serail_baud = config.get('serial', 'baud')
ser = serial.Serial(serail_port,serail_baud,timeout=1)
ser.flushInput()
ser.flushOutput()
ser.write('M728\r\n') #machine alive
time.sleep(0.5)
#LOAD USER CONFIG
#read configs
json_f = open(config.get('printer', 'settings_file'))
config = json.load(json_f)
##UNITS
#load custom units
#ser.write("M92 X"+str(config[x])+"\r\n")
#ser.write("M92 Y"+str(config[y])+"\r\n")
#ser.write("M92 Z"+str(config[z])+"\r\n")
#ser.write("M92 E"+str(config[e])+"\r\n")
##COLORS
ser.write("M701 S"+str(config['color']['r'])+"\r\n")
ser.write("M702 S"+str(config['color']['g'])+"\r\n")
ser.write("M703 S"+str(config['color']['b'])+"\r\n")
print "Ambient color setted"
#SAFETY
try:
safety_door = config['safety']['door']
except KeyError:
safety_door = 0
ser.write("M732 S"+str(safety_door)+"\r\n")
#print "Safety door setted"
try:
switch = config['switch']
except KeyError:
switch = 0
ser.write("M714 S"+str(switch)+"\r\n")
#print "Homing direction setted"
#clean the buffer and leave
serial.flush()
serial.close()
print "Boot completed"
#quit
sys.exit() | gpl-2.0 | -3,442,506,213,277,008,000 | 19.077922 | 60 | 0.6533 | false |
liweitianux/atoolbox | astro/radec2deg.py | 1 | 1957 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Aaron LI
# Created: 2015-04-17
# Updated: 2016-06-30
#
"""
Convert the coordinates data in format (??h??m??s, ??d??m??s)
to format (degree, degree).
"""
import os
import sys
import re
import getopt
import math
USAGE = """Usage:
%(prog)s [ -h ] -i coords_file
Required arguments:
-i, --infile
infile containing the coordinates
Optional arguments:
-h, --help
""" % {'prog': os.path.basename(sys.argv[0])}
def usage():
print(USAGE)
def ra2deg(h, m, s):
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def dec2deg(d, m, s):
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def s_ra2deg(hms):
h, m, s = map(float, re.sub('[hms]', ' ', hms).split())
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def s_dec2deg(dms):
d, m, s = map(float, re.sub('[dms]', ' ', dms).split())
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def calc_offset(coord1, coord2):
ra1, dec1 = coord1
ra2, dec2 = coord2
return math.sqrt((ra1-ra2)**2 + (dec1-dec2)**2)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:",
["help", "infile="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
elif opt in ("-i", "--infile"):
infile = arg
else:
assert False, "unhandled option"
for line in open(infile):
if re.match(r"^\s*#", line) or re.match(r"^\s*$", line):
continue
ra, dec = line.split()
ra_deg = s_ra2deg(ra)
dec_deg = s_dec2deg(dec)
print("%.8f %.8f" % (ra_deg, dec_deg))
if __name__ == "__main__":
main()
| mit | 6,794,721,827,466,608,000 | 19.385417 | 64 | 0.502299 | false |
jfantom/incubator-airflow | airflow/contrib/auth/backends/proxied_auth.py | 1 | 3179 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from sys import version_info
import flask_login
from flask_login import login_required, current_user, logout_user
from airflow import settings
from airflow import models
from airflow.utils.log.logging_mixin import LoggingMixin
import os
log = LoggingMixin().log
class AuthenticationError(Exception):
pass
class ProxiedUser(models.User):
def __init__(self, user):
self.user = user
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return self.user.get_id()
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
class ProxiedAuth(object):
def __init__(self):
self.login_manager = flask_login.LoginManager()
def init_app(self,flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
#checks headers instead of cookies
self.login_manager.request_loader(self.load_request)
# this is needed to disable the anti forgery check
flask_app.config['WTF_CSRF_CHECK_DEFAULT'] = False
def load_request(self, request):
'''
Reads the header field that has already been verified on the
nginx side by google auth. Header field is specified by setting
the environment variable AIRFLOW_PROXIED_AUTH_HEADER or else
it's defaulted to X-Email.
'''
session = settings.Session()
header_field = os.getenv('AIRFLOW_PROXIED_AUTH_HEADER', 'X-Email')
user_email = request.headers.get(header_field)
# this shouldn't happen since nginx should take care of it!
if user_email is None:
raise AuthenticationError(
'Airflow failed to get fields from request header')
# insert user into database if doesn't exist
user = session.query(models.User).filter(
models.User.username == user_email).first()
if not user:
user = models.User(
username=user_email,
is_superuser=True)
session.merge(user)
session.commit()
session.close()
return ProxiedUser(user)
login_manager = ProxiedAuth()
| apache-2.0 | 3,035,449,064,927,744,000 | 28.165138 | 74 | 0.650834 | false |
dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/Bag.py | 1 | 2806 | """Bag class is a container for generic items."""
class Bag(object): # <Item> implements Iterable<Item>:
"""The Bag class represents a bag (or multiset) of generic items."""
class _Node(object): # private static class <Item>:
"""helper linked list class"""
def __init__(self, Item, Next):
self._item = Item
self._next = Next
def __init__(self):
self._first = None # beginning of bag
self._N = 0 # number of elements in bag
def isEmpty(self):
"""return true if this bag is empty; false otherwise."""
return self._first is None
def size(self):
"""Returns the number of items in this bag."""
return self._N
def add(self, item):
"""Adds the arg item to this bag."""
self._first = self._Node(item, self._first)
self._N += 1
# Returns an iterator that iterates over the items in the bag in arbitrary order.
def __iter__(self):
return self._ListIterator(self._first)
class _ListIterator(object): # <Item> implements Iterator<Item>:
"""an iterator, doesn't implement remove() since it's optional."""
def __init__(self, first):
self._current = first
def hasNext(self):
"""If we are not at the end of the Bag."""
return self._current is not None
def next(self):
"""Go to the next element."""
if not self.hasNext():
raise StopIteration
item = self._current._item
self._current = self._current._next
return item
#************************************************************************
# Compilation: javac Bag.java
# Execution: java Bag < input.txt
#
# A generic bag or multiset, implemented using a singly-linked list.
#
# % more tobe.txt
# to be or not to - be - - that - - - is
#
# % java Bag < tobe.txt
# size of bag = 14
# is
# -
# -
# -
# that
# -
# -
# be
# -
# to
# not
# or
# be
# to
#
#************************************************************************/
# The Bag class represents a bag (or multiset) of generic items.
# It supports insertion and iterating over the
# items in arbitrary order.
#
# This implementation uses a singly-linked list with a static nested class Node.
# See {@link LinkedBag} for the version from the
# textbook that uses a non-static nested class.
# The <em>add</em>, <em>isEmpty</em>, and <em>size</em> operations
# take constant time. Iteration takes time proportional to the number of items.
#
# For additional documentation, see
# <a href="http://algs4.cs.princeton.edu/13stacks">Section 1.3</a> of
# <i>Algorithms, 4th Edition</i> by Robert Sedgewick and Kevin Wayne.
#
# @author Robert Sedgewick
# @author Kevin Wayne
# @converted to Python by DV Klopfenstein
# Copyright (C) 2002-2010, Robert Sedgewick and Kevin Wayne.
# Java last updated: Tue Mar 25 04:52:35 EDT 2014.
| gpl-2.0 | 8,381,885,796,278,084,000 | 27.343434 | 83 | 0.612616 | false |
Parcks/core | test/service/test_post_installation_facade.py | 1 | 3532 | """
Scriptable Packages Installer - Parcks
Copyright (C) 2017 JValck - Setarit
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Setarit - parcks[at]setarit.com
"""
from __future__ import absolute_import
import json
from src.domain.log.logger import Logger
from src.domain.model.post_install.shell import Shell
from src.domain.model.post_install.shell_command import ShellCommand
from src.service.post_installation_facade import PostInstallationFacade
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class TestPostInstallationFacade(unittest.TestCase):
def setUp(self):
self.create_facade()
self.create_json_object()
self.create_json_array()
Logger.disable_all()
def tearDown(self):
Logger.enable()
def create_facade(self):
self.facade_under_test = PostInstallationFacade([
Shell("Dummy", [ShellCommand(["ls"])]),
Shell("Dummy", [ShellCommand(["ls"])])
])
def create_json_object(self):
Json = """\
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
}
"""
self.json_object = json.loads(Json)
def create_json_array(self):
Json = """\
[
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
},
{
"type":"shell",
"name":"Dummy",
"cmds":[
{
"do":["ls"],
"root":false
}
]
}
]
"""
self.json_list = json.loads(Json)
@patch.object(Shell, 'run')
def test_handle_post_installation_calls_run_on_PostInstallRunnable(self, mock):
self.facade_under_test.handle_post_installation()
self.assertEqual(2, mock.call_count)
def test_from_json_creates_facade_with_list_if_json_list(self):
facade = PostInstallationFacade.from_json(self.json_list)
self.assertTrue(isinstance(facade.post_install_runnables, list))
def test_from_json_creates_facade_with_list_if_json_object(self):
facade = PostInstallationFacade.from_json(self.json_object)
self.assertTrue(isinstance(facade.post_install_runnables, list))
@patch.object(Shell, 'run')
def test_handle_post_installalation_calls_run_on_PostInstallRunnable_if_one_object(self, mock):
facade = PostInstallationFacade.from_json(self.json_object)
facade.handle_post_installation()
self.assertEqual(1, mock.call_count) | gpl-2.0 | 3,556,626,939,037,611,000 | 30.544643 | 99 | 0.592582 | false |
M32Media/redash | redash/tasks/queries.py | 1 | 24668 | import json
import time
import logging
import signal
import redis
import re
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client, settings, utils
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import InterruptException
from .alerts import check_alerts_for_query
from redash.authentication.account import send_api_token
logger = get_task_logger(__name__)
@celery.task(name="redash.tasks.refresh_selected_queries")
def refresh_selected_queries(
months, publishers, global_queries=False, non_monthly_publisher_queries=False,
no_query_execution=False):
outdated_queries_count = 0
query_ids = []
all_dashboards = models.Dashboard.query.all()
dashboard_ids_names = [
(db.id, db.name) for db in all_dashboards
if (publishers == ['ALL'] or any(publisher == db.name.split(':')[0] for publisher in publishers))]
if global_queries:
dashboard_ids_names += [(db.id, db.name) for db in all_dashboards if db.name.split(':')[0] == 'Global']
jobs = []
# An example of Dashboard is Cogeco:unsold:stats or Cogeco:segment:profile_referrer
for db_id, db_name in dashboard_ids_names:
dashboard = models.Dashboard.get_by_id(db_id)
layout_list = [widget_id for row in json.loads(dashboard.layout) for widget_id in row]
widgets = [models.Widget.get_by_id(widget_id) for widget_id in layout_list if not widget_id < 0]
# Some widgets are None objects, and this makes the script fail
widgets = [widget for widget in widgets if widget]
for widget in widgets:
condition = widget.visualization != None and any(month in widget.visualization.name for month in months)
if non_monthly_publisher_queries:
# If the flag is True, add the queries where the pattern DDDDDD, with D being a digit, is not present in the query
# This adds everything that is not month dependent to the query list
# e.g. Cogeco:segment:profile_referrer:view_cogeco, Global:Intell:AdManager:view_last_6m
condition = condition or (not re.findall(r'_(\d{6})', widget.visualization.name))
if global_queries:
condition = condition or db_name.split(':')[0] == 'Global'
if condition:
query_id = widget.visualization.query_rel.id
query = models.Query.get_by_id(query_id)
# If no_query_execution flag is enabled, the query is not run and we only return the query text
if no_query_execution:
jobs.append({
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
else:
jobs.append({
'task': enqueue_query(
query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}).to_dict(),
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
query_ids.append(query.id)
outdated_queries_count += 1
logger.info(jobs)
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset(
'redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
return jobs
"""
Gets task associated with ids
"""
def get_tasks(ids):
tasks = {}
if type(ids) is str:
lists = QueryTaskTracker.ALL_LISTS
else:
lists = QueryTaskTracker.ALL_LISTS
#Each different list of query trackers
for _list in lists:
#Each different tracker inside a list
for tracker in QueryTaskTracker.all(_list):
for _id in ids:
#Check if id given is matching a tracker
if(_id == tracker.task_id):
qt = QueryTask(tracker.task_id)
data = qt.to_dict()
tasks[tracker.task_id] = {
"status" : data.get('status', None),
"query_id" : tracker.query_id,
"query_result_id" : data.get('query_result_id', None),
"error": data.get('error', None)
}
return tasks;
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
# TODO:
# There is some duplication between this class and QueryTask, but I wanted to implement the monitoring features without
# much changes to the existing code, so ended up creating another object. In the future we can merge them.
class QueryTaskTracker(object):
DONE_LIST = 'query_task_trackers:done'
WAITING_LIST = 'query_task_trackers:waiting'
IN_PROGRESS_LIST = 'query_task_trackers:in_progress'
ALL_LISTS = (DONE_LIST, WAITING_LIST, IN_PROGRESS_LIST)
def __init__(self, data):
self.data = data
@classmethod
def create(cls, task_id, state, query_hash, data_source_id, scheduled, metadata):
data = dict(task_id=task_id, state=state,
query_hash=query_hash, data_source_id=data_source_id,
scheduled=scheduled,
username=metadata.get('Username', 'unknown'),
query_id=metadata.get('Query ID', 'unknown'),
retries=0,
scheduled_retries=0,
created_at=time.time(),
started_at=None,
run_time=None)
return cls(data)
def save(self, connection=None):
if connection is None:
connection = redis_connection
self.data['updated_at'] = time.time()
key_name = self._key_name(self.data['task_id'])
connection.set(key_name, utils.json_dumps(self.data))
connection.zadd(self._get_list(), time.time(), key_name)
for l in self.ALL_LISTS:
if l != self._get_list():
connection.zrem(l, key_name)
# TOOD: this is not thread/concurrency safe. In current code this is not an issue, but better to fix this.
def update(self, **kwargs):
self.data.update(kwargs)
self.save()
@staticmethod
def _key_name(task_id):
return 'query_task_tracker:{}'.format(task_id)
def _get_list(self):
if self.state in ('finished', 'failed', 'cancelled'):
return self.DONE_LIST
if self.state in ('created'):
return self.WAITING_LIST
return self.IN_PROGRESS_LIST
@classmethod
def get_by_task_id(cls, task_id, connection=None):
if connection is None:
connection = redis_connection
key_name = cls._key_name(task_id)
data = connection.get(key_name)
return cls.create_from_data(data)
@classmethod
def create_from_data(cls, data):
if data:
data = json.loads(data)
return cls(data)
return None
@classmethod
def all(cls, list_name, offset=0, limit=-1):
if limit != -1:
limit -= 1
if offset != 0:
offset -= 1
ids = redis_connection.zrevrange(list_name, offset, limit)
pipe = redis_connection.pipeline()
for id in ids:
pipe.get(id)
tasks = [cls.create_from_data(data) for data in pipe.execute()]
return tasks
@classmethod
def prune(cls, list_name, keep_count):
count = redis_connection.zcard(list_name)
if count <= keep_count:
return 0
remove_count = count - keep_count
keys = redis_connection.zrange(list_name, 0, remove_count - 1)
redis_connection.delete(*keys)
redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)
return remove_count
def __getattr__(self, item):
return self.data[item]
def __contains__(self, item):
return item in self.data
class QueryTask(object):
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
def to_dict(self):
task_info = self._async_result._get_task_meta()
result, task_status = task_info['result'], task_info['status']
if task_status == 'STARTED':
updated_at = result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[task_status]
if isinstance(result, Exception):
error = result.message
status = 4
elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if task_status == 'SUCCESS' and not error:
query_result_id = result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': status,
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
result = execute_query.apply_async(args=(
query, data_source.id, metadata, user_id,
scheduled_query_id),
queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(
result.id, 'created', query_hash, data_source.id,
scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
@celery.task(name="redash.tasks.refresh_queries_http")
def refresh_queries_http():
logger.info("Refreshing queries...")
jobs = []
for query in models.Query.every_queries():
logger.info("Updating Query {} ...".format(query.id))
if query.data_source.paused:
logger.info("Skipping refresh of Query 1 {} because datasource {} is paused because {}"
.format(query.id,
query.data_source.name,
query.data_source.pause_reason
))
else:
jobs.append(enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}))
""" LINK BETWEEN TRACKER AND ACTUAL TASK
for job in jobs:
print("JOBS : {}".format(job.to_dict().get('id', None)))
lists = QueryTaskTracker.ALL_LISTS
for _list in lists:
for tracker in QueryTaskTracker.all(_list):
print("TRACKER : {}".format(tracker.data.get('task_id', None)))
"""
return jobs
@celery.task(name="redash.tasks.refresh_queries")
def refresh_queries():
outdated_queries_count = 0
query_ids = []
with statsd_client.timer('manager.outdated_queries_lookup'):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
elif query.data_source.paused:
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids))
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(name="redash.tasks.cleanup_tasks")
def cleanup_tasks():
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
for tracker in in_progress:
result = AsyncResult(tracker.task_id)
# If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
# mark it as "dead":
if result.status == 'PENDING':
logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
tracker.query_hash, tracker.task_id)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='cancelled')
if result.ready():
logging.info("in progress tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
for tracker in waiting:
result = AsyncResult(tracker.task_id)
if result.ready():
logging.info("waiting tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
# Maintain constant size of the finished tasks list:
QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
@celery.task(name="redash.tasks.cleanup_query_results")
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
settings.QUERY_RESULTS_MAX_AGE (a week by default, so it's less likely to be open in someone's browser and be used).
Each time the job deletes only settings.QUERY_RESULTS_CLEANUP_COUNT (100 by default) query results so it won't choke
the database in case of many such results.
"""
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
@celery.task(name="redash.tasks.refresh_schemas")
def refresh_schemas():
"""
Refreshes the data sources schemas.
"""
blacklist = [int(ds_id) for ds_id in redis_connection.smembers('data_sources:schema:blacklist') if ds_id]
global_start_time = time.time()
logger.info(u"task=refresh_schemas state=start")
for ds in models.DataSource.query:
if ds.paused:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=paused(%s)", ds.id, ds.pause_reason)
elif ds.id in blacklist:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=blacklist", ds.id)
else:
logger.info(u"task=refresh_schema state=start ds_id=%s", ds.id)
start_time = time.time()
try:
ds.get_schema(refresh=True)
logger.info(u"task=refresh_schema state=finished ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
except Exception:
logger.exception(u"Failed refreshing schema for the data source: %s", ds.name)
logger.info(u"task=refresh_schema state=failed ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.query.get(user_id)
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
self.tracker.update(started_at=time.time(), state='started')
logger.debug("Executing query:\n%s", self.query)
self._log_progress('executing_query')
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
error = unicode(e)
data = None
logging.warning('Unexpected error while running query:', exc_info=1)
run_time = time.time() - self.tracker.started_at
self.tracker.update(error=error, run_time=run_time, state='saving_results')
logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error)
_unlock(self.query_hash, self.data_source.id)
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
if self.scheduled_query:
self.scheduled_query.schedule_failures += 1
models.db.session.add(self.scheduled_query)
else:
if (self.scheduled_query and
self.scheduled_query.schedule_failures > 0):
self.scheduled_query.schedule_failures = 0
models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org, self.data_source,
self.query_hash, self.query, data,
run_time, utils.utcnow())
self._log_progress('checking_alerts')
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress('finished')
result = query_result.id
models.db.session.commit()
return result
def _annotate_query(self, query_runner):
if query_runner.annotate_query():
self.metadata['Task ID'] = self.task.request.id
self.metadata['Query Hash'] = self.query_hash
self.metadata['Queue'] = self.task.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in self.metadata.iteritems()])
annotated_query = u"/* {} */ {}".format(annotation, self.query)
else:
annotated_query = self.query
return annotated_query
def _log_progress(self, state):
logger.info(
u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"task_id=%s queue=%s query_id=%s username=%s",
state, self.query_hash, self.data_source.type, self.data_source.id,
self.task.request.id,
self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'),
self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
logger.info("task=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.query.get(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None,
scheduled_query_id=None):
if scheduled_query_id is not None:
scheduled_query = models.Query.query.get(scheduled_query_id)
else:
scheduled_query = None
return QueryExecutor(self, query, data_source_id, user_id, metadata,
scheduled_query).run()
#Update tokens for API data access
@celery.task(name="redash.tasks.refresh_query_tokens")
def refresh_query_tokens():
logger.warning("Refreshing User Tokens")
#Refresh Tokens
models.User.refresh_tokens()
#Send Emails
users = models.User.get_all()
for u in users:
user = u.to_dict()
send_api_token(user)
| bsd-2-clause | -672,060,604,044,315,900 | 36.603659 | 160 | 0.59186 | false |
dios-game/dios-cocos | src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/plugin_jscompile/__init__.py | 1 | 12716 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import platform
import cocos
from MultiLanguage import MultiLanguage
class CCPluginJSCompile(cocos.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def plugin_name():
return "jscompile"
@staticmethod
def brief_description():
# returns a short description of this module
return MultiLanguage.get_string('JSCOMPILE_BRIEF')
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._verbose = options.verbose
self._config = None
self._workingdir = workingdir
self._closure_params = ''
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self._pre_order = self._config["pre_order"]
self.normalize_path_in_list(self._pre_order)
self._post_order = self._config["post_order"]
self.normalize_path_in_list(self._post_order)
self._skip = self._config["skip"]
self.normalize_path_in_list(self._skip)
self._closure_params = self._config["closure_params"]
if options.closure_params is not None:
self._closure_params = options.closure_params
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir)
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_SRCDIR_NAME_NOT_FOUND'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', dst_rootpath),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
cocos.Logging.debug(MultiLanguage.get_string('JSCOMPILE_DEBUG_COMPILE_FILE_FMT', jsfile))
jsbcc_exe_path = ""
if(cocos.os_is_linux()):
if(platform.architecture()[0] == "32bit"):
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x86")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "linux", "jsbcc_x64")
else:
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
cmd_str = "\"%s\" \"%s\" \"%s\"" % (jsbcc_exe_path, jsfile, output_file)
self._run_cmd(cmd_str)
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar \"%s\" %s --js %s --js_output_file \"%s\"" % (compiler_jar_path, self._closure_params, jsfiles, self._compressed_js_path)
self._run_cmd(command)
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._pre_order, 1)
def js_filename_post_order_compare(self, a, b):
return self._js_filename_compare(a, b, self._post_order, -1)
def _js_filename_compare(self, a, b, files, delta):
index_a = self.index_in_list(a, files)
index_b = self.index_in_list(b, files)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1 * delta
elif not is_a_in_list and is_b_in_list:
return 1 * delta
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._skip:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPRESS_TIP'))
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
cocos.Logging.info(MultiLanguage.get_string('JSCOMPILE_INFO_COMPILE_TO_BYTECODE'))
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos.py script
def run(self, argv, dependencies):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_MKDIR_FAILED_FMT', self._dst_dir),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# download the bin folder
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc")
if not os.path.exists(jsbcc_exe_path):
download_cmd_path = os.path.join(self._workingdir, os.pardir, os.pardir)
subprocess.call("python %s -f -r no" % (os.path.join(download_cmd_path, "download-bin.py")), shell=True, cwd=download_cmd_path)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
cocos.Logging.info(MultiLanguage.get_string('LUACOMPILE_INFO_FINISHED'))
def parse_args(self, argv):
"""
"""
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
help=MultiLanguage.get_string('LUACOMPILE_ARG_VERBOSE'))
parser.add_argument("-s", "--src",
action="append", dest="src_dir_arr",
help=MultiLanguage.get_string('JSCOMPILE_ARG_SRC'))
parser.add_argument("-d", "--dst",
action="store", dest="dst_dir",
help=MultiLanguage.get_string('JSCOMPILE_ARG_DST'))
parser.add_argument("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help=MultiLanguage.get_string('JSCOMPILE_ARG_CLOSURE'))
parser.add_argument("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help=MultiLanguage.get_string('JSCOMPILE_ARG_OUT_FILE_NAME'))
parser.add_argument("-j", "--compiler_config",
action="store", dest="compiler_config",
help=MultiLanguage.get_string('JSCOMPILE_ARG_JSON_FILE'))
parser.add_argument("-m", "--closure_params",
action="store", dest="closure_params",
help=MultiLanguage.get_string('JSCOMPILE_ARG_EXTRA_PARAM'))
options = parser.parse_args(argv)
if options.src_dir_arr == None:
raise cocos.CCPluginError(MultiLanguage.get_string('JSCOMPILE_ERROR_SRC_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
elif options.dst_dir == None:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DST_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise cocos.CCPluginError(MultiLanguage.get_string('LUACOMPILE_ERROR_DIR_NOT_EXISTED_FMT',
(src_dir)),
cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# script directory
if getattr(sys, 'frozen', None):
workingdir = os.path.realpath(os.path.dirname(sys.executable))
else:
workingdir = os.path.realpath(os.path.dirname(__file__))
self.init(options, workingdir)
| mit | 2,858,667,076,001,205,000 | 37.88685 | 150 | 0.542781 | false |
Subsets and Splits