code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from awsses import AWSSES
from sendgrid import SendGrid
from mailgun import MailGun
from postageapp import PostageApp
from postmarkapp import PostmarkApp
from critsend import CritSend
| adamgilman/ems-costing | vendors/__init__.py | Python | mit | 184 |
from zeit.cms.i18n import MessageFactory as _
from zeit.content.image.interfaces import INFOGRAPHIC_DISPLAY_TYPE
from zope.browserpage import ViewPageTemplateFile
from zope.cachedescriptors.property import Lazy as cachedproperty
import PIL.Image
import pkg_resources
import z3c.conditionalviews
import zeit.cms.browser.interfaces
import zeit.cms.browser.listing
import zeit.cms.browser.view
import zeit.cms.repository.interfaces
import zeit.cms.settings.interfaces
import zeit.connector.interfaces
import zeit.content.image.imagereference
import zeit.content.image.interfaces
import zope.component
import zope.file.download
import zope.publisher.interfaces
def get_img_tag(image, request, view=None):
"""Render <img.../>-tag."""
url = zope.component.getMultiAdapter(
(image, request), name='absolute_url')
width, height = image.getImageSize()
if view:
view = '/' + view
else:
view = ''
return (
'<img src="%s%s" alt="" height="%s" width="%s" border="0" />' % (
url, view, height, width))
class Image(zope.file.download.Display):
def __call__(self):
self.request.response.setHeader('Content-Type', self.context.mimeType)
return self.stream_image()
@z3c.conditionalviews.ConditionalView
def stream_image(self):
return super(Image, self).__call__()
class ImageView(zeit.cms.browser.view.Base):
title = _('View image')
@cachedproperty
def metadata(self):
return zeit.content.image.interfaces.IImageMetadata(self.context)
def tag(self):
return get_img_tag(self.context, self.request, view='@@raw')
@property
def width(self):
return self.context.getImageSize()[0]
@property
def height(self):
return self.context.getImageSize()[1]
@property
def copyright(self):
if not self.metadata.copyright:
return
copyright, company, company_text, url, nofollow = (
self.metadata.copyright)
return dict(
copyright=copyright,
company=company,
company_text=company_text,
url=url,
nofollow=nofollow)
class ReferenceDetailsHeading(zeit.cms.browser.objectdetails.Details):
template = ViewPageTemplateFile(pkg_resources.resource_filename(
'zeit.cms.browser', 'object-details-heading.pt'))
def __init__(self, context, request):
super(ReferenceDetailsHeading, self).__init__(context.target, request)
def __call__(self):
return self.template()
class ReferenceDetailsBody(ImageView):
@cachedproperty
def metadata(self):
return zeit.content.image.interfaces.IImageMetadata(
self.context.target)
@cachedproperty
def is_infographic(self):
if zeit.content.image.interfaces.IImageGroup.providedBy(
self.context.target):
return self.context.target.display_type == INFOGRAPHIC_DISPLAY_TYPE
return False
def tag(self):
return get_img_tag(self.context.target, self.request, view='@@raw')
class Scaled(object):
filter = PIL.Image.ANTIALIAS
def __call__(self):
return self.scaled()
def tag(self):
return get_img_tag(self.scaled.context, self.request)
@cachedproperty
def scaled(self):
try:
image = zeit.content.image.interfaces.ITransform(self.context)
except TypeError:
image = self.context
else:
image = image.thumbnail(self.width, self.height, self.filter)
image.__name__ = self.__name__
image_view = zope.component.getMultiAdapter(
(image, self.request), name='raw')
return image_view
class Preview(Scaled):
width = 500
height = 500
class MetadataPreview(Scaled):
width = 500
height = 90
class Thumbnail(Scaled):
width = height = 100
class ImageListRepresentation(
zeit.cms.browser.listing.BaseListRepresentation):
"""Adapter for listing article content resources"""
zope.interface.implements(zeit.cms.browser.interfaces.IListRepresentation)
zope.component.adapts(zeit.content.image.interfaces.IImage,
zope.publisher.interfaces.IPublicationRequest)
author = ressort = page = u''
@property
def title(self):
try:
title = zeit.content.image.interfaces.IImageMetadata(
self.context).title
except Exception:
title = None
if not title:
title = self.context.__name__
return title
@property
def volume(self):
return None
@property
def year(self):
return None
@property
def searchableText(self):
# XXX
return ''
@zope.component.adapter(
zeit.cms.repository.interfaces.IFolder,
zeit.content.image.interfaces.IImageSource)
@zope.interface.implementer(
zeit.cms.browser.interfaces.IDefaultBrowsingLocation)
def imagefolder_browse_location(context, source):
"""The image browse location is deduced from the current folder, i.e.
for /online/2007/32 it is /bilder/2007/32
"""
unique_id = context.uniqueId
repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
base = image_folder = None
try:
obj_in_repository = repository.getContent(unique_id)
except KeyError:
pass
else:
# Try to get a base folder
while base is None:
properties = zeit.connector.interfaces.IWebDAVProperties(
obj_in_repository, None)
if properties is None:
break
base = properties.get(('base-folder',
'http://namespaces.zeit.de/CMS/Image'))
obj_in_repository = obj_in_repository.__parent__
if base is not None:
try:
base_obj = repository.getContent(base)
except KeyError:
pass
else:
# Get from the base folder to the year/volume folder
settings = zeit.cms.settings.interfaces.IGlobalSettings(context)
try:
image_folder = base_obj[
'%04d' % settings.default_year][
'%02d' % settings.default_volume]
except KeyError:
pass
if image_folder is None:
all_content_source = zope.component.getUtility(
zeit.cms.content.interfaces.ICMSContentSource, name='all-types')
image_folder = zope.component.queryMultiAdapter(
(context, all_content_source),
zeit.cms.browser.interfaces.IDefaultBrowsingLocation)
return image_folder
@zope.component.adapter(
zeit.content.image.imagereference.ImagesAdapter,
zeit.content.image.interfaces.IImageSource)
@zope.interface.implementer(
zeit.cms.browser.interfaces.IDefaultBrowsingLocation)
def imageadapter_browse_location(context, source):
return zope.component.queryMultiAdapter(
(context.__parent__, source),
zeit.cms.browser.interfaces.IDefaultBrowsingLocation)
class MetadataPreviewHTML(object):
@cachedproperty
def metadata(self):
return zeit.content.image.interfaces.IImageMetadata(self.context)
| ZeitOnline/zeit.content.image | src/zeit/content/image/browser/image.py | Python | bsd-3-clause | 7,284 |
from setuptools import setup, find_packages
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
install_requires = [
"click==6.6",
"jinja2==2.8"
]
setup(
name='logrotated',
version="0.0.3",
url='https://github.com/nir0s/logrotated',
author='nir0s',
author_email='[email protected]',
license='LICENSE',
platforms='All',
description='A logrotate human friendly interface.',
long_description=read('README.rst'),
packages=find_packages(exclude=[]),
package_data={'logrotated': ['resources/logrotate']},
entry_points={
'console_scripts': [
'rotatethis = logrotated.logrotated:main',
]
},
install_requires=install_requires
)
| nir0s/logrotated | setup.py | Python | apache-2.0 | 879 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDask(PythonPackage):
"""Minimal task scheduling abstraction"""
homepage = "https://github.com/dask/dask/"
url = "https://pypi.io/packages/source/d/dask/dask-0.8.1.tar.gz"
version('0.8.1', '5dd8e3a3823b3bc62c9a6d192e2cb5b4')
depends_on('py-setuptools', type='build')
| wscullin/spack | var/spack/repos/builtin/packages/py-dask/package.py | Python | lgpl-2.1 | 1,557 |
__author__ = 'yuens'
| ysh329/my-first-cnn | test/__init__.py | Python | mit | 21 |
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains constants for Chrome OS bots."""
from __future__ import print_function
import os
BUILDBOT_DIR = '/b'
BUILDBOT_USER = 'chrome-bot'
CHROMITE_URL = 'https://chromium.googlesource.com/chromiumos/chromite'
DEPOT_TOOLS_URL = ('https://chromium.googlesource.com/chromium/tools/'
'depot_tools.git')
BUILDBOT_GIT_REPO = ('https://chrome-internal.googlesource.com/chrome/tools/'
'build/internal.DEPS')
CHROMIUM_BUILD_URL = 'https://chromium.googlesource.com/chromium/src/build'
GCOMPUTE_TOOLS_URL = 'https://gerrit.googlesource.com/gcompute-tools'
# The BOT_CREDS_DIR is required to set up a GCE bot. The directory
# should contain:
# - SVN_PASSWORD_FILE_: password for svn.
# - TREE_STATUS_PASSWORD_FILE: password for updating tree status.
# - CIDB_CREDS_DIR: A directory containing cidb credentials.
# - BUILDBOT_PASSWORD_FILE: password for buildbot.
# - HOST_ENTRIES: entries to append to /etc/hosts
# - GMAIL_CREDENTIALS_FILE: credentials to access Gmail API.
BOT_CREDS_DIR_ENV_VAR = 'BOT_CREDENTIALS_DIR'
SVN_PASSWORD_FILE = 'svn_password'
TREE_STATUS_PASSWORD_FILE = '.status_password_chromiumos'
CIDB_CREDS_DIR = '.cidb_creds'
BUILDBOT_PASSWORD_FILE = '.bot_password'
HOST_ENTRIES = 'host_entries'
GMAIL_CREDENTIALS_FILE = '.gmail_credentials'
# This path is used to store credentials on the GCE machine during botifying.
BOT_CREDS_TMP_PATH = os.path.join(os.path.sep, 'tmp', 'bot-credentials')
BUILDBOT_SVN_USER = '%[email protected]' % BUILDBOT_USER
CHROMIUM_SVN_HOSTS = ('svn.chromium.org',)
CHROMIUM_SVN_REPOS = ('chrome', 'chrome-internal', 'leapfrog-internal')
GIT_USER_NAME = 'chrome-bot'
GIT_USER_EMAIL = '%[email protected]' % GIT_USER_NAME
| guorendong/iridium-browser-ubuntu | third_party/chromite/compute/bot_constants.py | Python | bsd-3-clause | 1,890 |
import ROOT
import sys
import os
file_lines = open("training.csv").readlines()
branch_def = ':'.join(file_lines[0].strip().split(','))
out_file = ROOT.TFile("test_out.root", "recreate")
mvaTree = ROOT.TTree("mvaTree", "variables tree")
# replacing the signal / background to 1 / 0
csv_string = ''.join(file_lines[1:]).replace(',s',',1').replace(',b',',0')
csv_stream = ROOT.istringstream(csv_string)
mvaTree.ReadStream(csv_stream, branch_def, ',')
out_file.cd()
mvaTree.Write()
| yuanchao/pyHiggsML | pyTMVA/myRead.py | Python | gpl-2.0 | 488 |
import sys
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
import dotenv
env_file = os.path.join(BASE_DIR, '.env')
#
dotenv.read_dotenv(env_file)
DEBUG = bool(int(os.environ.get('DEBUG', '0')))
TEMPLATE_DEBUG = DEBUG
IN_DEV = bool(int(os.environ.get('IN_DEV', '0')))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^n%f8rti)9fz_68r)xs13n4($r$6&)q@-74xcj5*th$c6c(pnp'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'fern',
'marketing',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dental.urls'
WSGI_APPLICATION = 'dental.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# },
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DBASE_NAME'),
'USER': os.environ.get('DBASE_USER'),
'PASSWORD': os.environ.get('DBASE_PASSWORD'),
'HOST': os.environ.get('DBASE_HOST') or ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/html/wmedia/dental/static/'
TEMPLATE_DIRS = (
(os.path.join(BASE_DIR, 'fern/templates')),
(os.path.join(BASE_DIR, 'marketing/templates')),
)
IN_TEST = 'test' in sys.argv
if IN_TEST:
DATABASES['default']['ENGINE'] = 'django.db.backends.sqlite3'
DATABASES['default']['NAME'] = ':memory:'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions',
],
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
} | edilio/dental | dental/settings.py | Python | mit | 2,930 |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Math.Polygon import Polygon
from UM.Math.Float import Float
import numpy
import math
import pytest
class TestPolygon():
def setup_method(self, method):
# Called before the first testfunction is executed
pass
def teardown_method(self, method):
# Called after the last testfunction was executed
pass
## The individual test cases for mirroring polygons.
test_mirror_data = [
({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 0], "axis_direction": [0, 1], "answer": [[-1.0, 2.0], [-2.0, 0.0], [0.0, 0.0]], "label": "Mirror Horizontal", "description": "Test mirroring a polygon horizontally." }),
({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 0], "axis_direction": [1, 0], "answer": [[1.0, -2.0], [2.0, 0.0], [0.0, 0.0]], "label": "Mirror Vertical", "description": "Test mirroring a polygon vertically." }),
({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [10, 0], "axis_direction": [0, 1], "answer": [[19.0, 2.0], [18.0, 0.0], [20.0, 0.0]], "label": "Mirror Horizontal Far", "description": "Test mirrorring a polygon horizontally on an axis that is not through the origin." }),
({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 4], "axis_direction": [1, 1], "answer": [[-2.0, 5.0], [-4.0, 6.0], [-4.0, 4.0]], "label": "Mirror Diagonal", "description": "Test mirroring a polygon diagonally." }),
({ "points": [[10.0, 0.0]], "axis_point": [0, 0], "axis_direction": [0, 1], "answer": [[-10.0, 0.0]], "label": "Mirror Single Vertex", "description": "Test mirroring a polygon with only one vertex." }),
({ "points": [], "axis_point": [0, 0], "axis_direction": [1, 0], "answer": [], "label": "Mirror Empty", "description": "Test mirroring an empty polygon." })
]
## Tests the mirror function.
#
# \param data The data of the test. Must include a list of points of the
# polygon to mirror, a point on the axis, a direction of the axis and an
# answer that is the result of the mirroring.
@pytest.mark.parametrize("data", test_mirror_data)
def test_mirror(self, data):
polygon = Polygon(numpy.array(data["points"], numpy.float32)) #Create a polygon with the specified points.
polygon.mirror(data["axis_point"], data["axis_direction"]) #Mirror over the specified axis.
points = polygon.getPoints()
assert len(points) == len(data["points"]) #Must have the same amount of vertices.
for point_index in range(len(points)):
assert len(points[point_index]) == len(data["answer"][point_index]) #Same dimensionality (2).
for dimension in range(len(points[point_index])):
assert Float.fuzzyCompare(points[point_index][dimension], data["answer"][point_index][dimension]) #All points must be equal.
## The individual test cases for the projection tests.
test_project_data = [
({ "normal": [0.0, 1.0], "answer": [1.0, 2.0], "label": "Project Vertical", "description": "Project the polygon onto a vertical line." }),
({ "normal": [1.0, 0.0], "answer": [0.0, 1.0], "label": "Project Horizontal", "description": "Project the polygon onto a horizontal line." }),
({ "normal": [math.sqrt(0.5), math.sqrt(0.5)], "answer": [math.sqrt(0.5), math.sqrt(4.5)], "label": "Project Diagonal", "description": "Project the polygon onto a diagonal line." })
]
## Tests the project function.
#
# \param data The data of the test. Must include a normal vector to
# project on and a pair of coordinates that is the answer.
@pytest.mark.parametrize("data", test_project_data)
def test_project(self, data):
p = Polygon(numpy.array([
[0.0, 1.0],
[1.0, 1.0],
[1.0, 2.0],
[0.0, 2.0]
], numpy.float32))
result = p.project(data["normal"]) #Project the polygon onto the specified normal vector.
assert len(result) == len(data["answer"]) #Same dimensionality (2).
for dimension in range(len(result)):
assert Float.fuzzyCompare(result[dimension], data["answer"][dimension])
## The individual test cases for the intersection tests.
test_intersect_data = [
({ "polygon": [[ 5.0, 0.0], [15.0, 0.0], [15.0, 10.0], [ 5.0, 10.0]], "answer": [-5.0, 0.0], "label": "Intersect Simple", "description": "Intersect with a polygon that fully intersects." }),
({ "polygon": [[-5.0, 0.0], [ 5.0, 0.0], [ 5.0, 10.0], [-5.0, 10.0]], "answer": [ 5.0, 0.0], "label": "Intersect Left", "description": "Intersect with a polygon on the negative x-axis side that fully intersects." }),
({ "polygon": [[ 0.0, 5.0], [10.0, 5.0], [10.0, 15.0], [ 0.0, 15.0]], "answer": [ 0.0, -5.0], "label": "Intersect Straight Above", "description": "Intersect with a polygon that is exactly above the base polygon (edge case)." }),
({ "polygon": [[ 0.0, -5.0], [10.0, -5.0], [10.0, 5.0], [ 0.0, 5.0]], "answer": [ 0.0, 5.0], "label": "Intersect Straight Left", "description": "Intersect with a polygon that is exactly left of the base polygon (edge case)." }),
({ "polygon": [[ 5.0, 5.0], [15.0, -5.0], [30.0, 5.0], [15.0, 15.0]], "answer": [-5.0, 0.0], "label": "Intersect Rotated", "description": "Intersect with a rotated square." }),
({ "polygon": [[15.0, 0.0], [25.0, 0.0], [25.0, 10.0], [15.0, 10.0]], "answer": None, "label": "Intersect Miss", "description": "Intersect with a polygon that doesn't intersect at all." })
]
## Tests the polygon intersect function.
#
# Every test case intersects a parametrised polygon with a base square of
# 10 by 10 units at the origin.
#
# \param data The data of the test. Must include a polygon to intersect
# with and a required answer.
@pytest.mark.parametrize("data", test_intersect_data)
def test_intersectsPolygon(self, data):
p1 = Polygon(numpy.array([ #The base polygon to intersect with.
[ 0, 0],
[10, 0],
[10, 10],
[ 0, 10]
], numpy.float32))
p2 = Polygon(numpy.array(data["polygon"])) #The parametrised polygon to intersect with.
#Shift the order of vertices in both polygons around. The outcome should be independent of what the first vertex is.
for n in range(0, len(p1.getPoints())):
for m in range(0, len(data["polygon"])):
result = p1.intersectsPolygon(p2)
if not data["answer"]: #Result should be None.
assert result == None
else:
assert result != None
for i in range(0, len(data["answer"])):
assert Float.fuzzyCompare(result[i], data["answer"][i])
p2.setPoints(numpy.roll(p2.getPoints(), 1, axis = 0)) #Shift p2.
p1.setPoints(numpy.roll(p1.getPoints(), 1, axis = 0)) #Shift p1.
## The individual test cases for convex hull intersection tests.
test_intersectConvex_data = [
({ "p1": [[-42, -32], [-42, 12], [62, 12], [62, -32]], "p2": [[-62, -12], [-62, 32], [42, 32], [42, -12]], "answer": [[-42, -12], [-42, 12], [42, 12], [42, -12]], "label": "UM2 Fans", "description": "A simple intersection without edge cases of UM2 fans collision area." })
]
## Tests the convex hull intersect function.
#
# \param data The data of the test case. Must include two polygons and a
# required result polygon.
@pytest.mark.parametrize("data", test_intersectConvex_data)
def test_intersectConvexHull(self, data):
p1 = Polygon(numpy.array(data["p1"]))
p2 = Polygon(numpy.array(data["p2"]))
result = p1.intersectionConvexHulls(p2)
assert len(result.getPoints()) == len(data["answer"]) #Same amount of vertices.
isCorrect = False
for rotation in range(0, len(result.getPoints())): #The order of vertices doesn't matter, so rotate the result around and if any check succeeds, the answer is correct.
thisCorrect = True #Is this rotation correct?
for vertex in range(0, len(result.getPoints())):
for dimension in range(0, len(result.getPoints()[vertex])):
if not Float.fuzzyCompare(result.getPoints()[vertex][dimension], data["answer"][vertex][dimension]):
thisCorrect = False
break #Break out of two loops.
if not thisCorrect:
break
if thisCorrect: #All vertices checked and it's still correct.
isCorrect = True
break
result.setPoints(numpy.roll(result.getPoints(), 1, axis = 0)) #Perform the rotation for the next check.
assert isCorrect | onitake/Uranium | tests/Math/TestPolygon.py | Python | agpl-3.0 | 8,965 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 11:09:51 2015
REMEMBER TO START TIME SERVER!
@author: marcus
"""
from __future__ import division
import time
import socket
import CastThread
import os
from psychopy import visual, core, event
import constants_wally
def create_broadcast_socket(ip = '0.0.0.0',port=9090):
# Start broadcasting
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
sock.bind((ip,port))
return sock
###############################################################################
# MAIN SCRIPT STARTS HERE
###############################################################################
# Setup psychopoy screen
win = visual.Window([800,600],screen=1)
text = visual.TextStim(win,text='',wrapWidth = 0.5,height = 0.1)
my_ip = UDP_IP_LOCAL = socket.gethostbyname(socket.gethostname())
eye_tracking = constants_wally.EYE_TRACKING
server_control = constants_wally.SERVER_CONTROL
sock = create_broadcast_socket()
sock.sendto('taskkill /im python.exe /f /t', (constants_wally.UDP_IP_SEND, constants_wally.UDP_PORT))
time.sleep(1)
# start eye tracker
if eye_tracking:
sock.sendto("C:\Program Files (x86)\SMI\RED-m Configuration Tool\iViewREDm-Client.exe",
(constants_wally.UDP_IP_SEND, constants_wally.UDP_PORT))
time.sleep(5)
# Start script by broadcasting message
call = ' '.join(['client_wally.py',str(int(eye_tracking)),str(int(server_control))])
filename = os.path.join(constants_wally.CLIENT_PATH,call)
text.setText('Press any key to start scripts on clients: '+str(constants_wally.CLIENTS))
text.draw()
win.flip()
event.waitKeys()
for i in constants_wally.CLIENTS:
print('python '+filename)
sock.sendto('python '+filename, (constants_wally.IP_prefix+str(i), constants_wally.UDP_PORT))
# Close broadcast socket and start multicast thread
sock.close()
#time.sleep(2)
text.setText('Clients started: '+str(constants_wally.CLIENTS))
text.draw()
win.flip()
time.sleep(2)
if server_control:
xyCasting = CastThread.MultiCast()
xyCasting.start()
# Tell the clients to start the calibration
time.sleep(1)
if eye_tracking:
text.setText('Press any key to start a calibration')
text.draw()
win.flip()
event.waitKeys()
xyCasting.send('calibrate')
# Don't proceed until all the clients have reported that they're done!
ip_list_temp = constants_wally.CLIENTS[:]
text.setText('Calibration in progress\n\n Remaining clients: '+str(ip_list_temp))
text.draw()
win.flip()
time.sleep(1)
while ip_list_temp:
allData = xyCasting.consumeAll()
for data, addr, time_arrive in allData:
if 'done_calibrating' in data:
print(addr[0] + ' done calibrating')
ip_list_temp.remove(int(addr[0].split('.')[-1]))
text.setText('Calibration in progress\n\n Remaining clients: '+str(ip_list_temp))
text.draw()
win.flip()
# proceed also if 'q' is pressed
k = event.getKeys(['q'])
if k:
break
# Start experiment when all clients are done calibrating
#xyCasting.stop()
text.setText('Press any key to start the experiment')
text.draw()
win.flip()
event.waitKeys()
xyCasting.send('start')
# Wait for the clients to finish and store reaction times
ip_list_temp = constants_wally.CLIENTS[:]
search_time = []
text.setText('Waiting for clients to finish\n\n Remaining clients: '+str(ip_list_temp))
text.draw()
win.flip()
time.sleep(1)
t0 = core.getTime()
while ip_list_temp:
allData = xyCasting.consumeAll()
#print(allData)
for data, addr, time_arrive in allData:
if 'exp_done' in data:
ip = int(addr[0].split('.')[-1])
rt = float(data.split(' ')[1])
ip_list_temp.remove(ip)
search_time.append([ip,rt])
text.setText('Waiting for clients to finish\n\n Remaining clients: '+str(ip_list_temp))
text.draw()
win.flip()
# Stop all clients if the maximum search time has been reached
if (core.getTime() - t0) >= constants_wally.MAX_SEARCH_TIME:
xyCasting.send('stop')
break
time.sleep(0.001)
# proceed also if 'q' is pressed
k = event.getKeys(['q'])
if k:
xyCasting.send('stop')
break
# Close the multicast socket
xyCasting.stop()
xyCasting.clean_up()
text.setText('Done!')
text.draw()
win.flip()
time.sleep(10)
win.close()
# Print reaction times and winners
if search_time:
print(sorted(search_time, key=lambda st: st[1]) )
core.quit()
#sock.shutdown(socket.SHUT_RDWR)
#sock.close() | marcus-nystrom/share-gaze | demo_shared_gaze/server_wally.py | Python | mit | 5,419 |
# -*- coding: utf-8 -*-
import json
from collections import defaultdict
from django.db.models import Count, Q
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from ...server.models import Project, Deployment
def enabled_deployments(user):
total = Deployment.objects.scope(user).filter(enabled=True).count()
link = '{}?enabled__exact=1&_REPLACE_'.format(
reverse('admin:server_deployment_changelist')
)
values_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
enabled=True, schedule=None
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_null[item.get('project__id')].append(
{
'name': _('Without schedule'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&schedule__isnull=True'.format(
item.get('project__id')
)
),
}
)
values_not_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
enabled=True,
).filter(
~Q(schedule=None)
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_not_null[item.get('project__id')].append(
{
'name': _('With schedule'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&schedule__isnull=False'.format(
item.get('project__id')
)
),
}
)
data = []
for project in Project.objects.scope(user).all():
count = 0
data_project = []
if project.id in values_null:
count += values_null[project.id][0]['value']
data_project.append(values_null[project.id][0])
if project.id in values_not_null:
count += values_not_null[project.id][0]['value']
data_project.append(values_not_null[project.id][0])
if count:
percent = float(count) / total * 100
data.append(
{
'name': project.name,
'value': count,
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(project.id)
),
'data': data_project
}
)
return {
'title': _('Enabled Deployments'),
'total': total,
'data': json.dumps(data),
'url': link.replace('&_REPLACE_', ''),
}
def deployment_by_enabled(user):
total = Deployment.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_deployment_changelist')
)
values_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
enabled=True
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_null[item.get('project__id')].append(
{
'name': _('Enabled'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&enabled__exact=1'.format(
item.get('project__id')
)
),
}
)
values_not_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
enabled=False,
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_not_null[item.get('project__id')].append(
{
'name': _('Disabled'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&enabled__exact=0'.format(
item.get('project__id')
)
),
}
)
data = []
for project in Project.objects.scope(user).all():
count = 0
data_project = []
if project.id in values_null:
count += values_null[project.id][0]['value']
data_project.append(values_null[project.id][0])
if project.id in values_not_null:
count += values_not_null[project.id][0]['value']
data_project.append(values_not_null[project.id][0])
if count:
percent = float(count) / total * 100
data.append(
{
'name': project.name,
'value': count,
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(project.id)
),
'data': data_project
}
)
return {
'title': _('Deployments / Enabled'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
def deployment_by_schedule(user):
total = Deployment.objects.scope(user).count()
link = '{}?_REPLACE_'.format(
reverse('admin:server_deployment_changelist')
)
values_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
schedule=None
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_null[item.get('project__id')].append(
{
'name': _('Without schedule'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&schedule__isnull=True'.format(
item.get('project__id')
)
),
}
)
values_not_null = defaultdict(list)
for item in Deployment.objects.scope(user).filter(
~Q(schedule=None)
).values(
'project__id',
).annotate(
count=Count('id')
).order_by('project__id', '-count'):
percent = float(item.get('count')) / total * 100
values_not_null[item.get('project__id')].append(
{
'name': _('With schedule'),
'value': item.get('count'),
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}&schedule__isnull=False'.format(
item.get('project__id')
)
),
}
)
data = []
for project in Project.objects.scope(user).all():
count = 0
data_project = []
if project.id in values_null:
count += values_null[project.id][0]['value']
data_project.append(values_null[project.id][0])
if project.id in values_not_null:
count += values_not_null[project.id][0]['value']
data_project.append(values_not_null[project.id][0])
if count:
percent = float(count) / total * 100
data.append(
{
'name': project.name,
'value': count,
'y': float('{:.2f}'.format(percent)),
'url': link.replace(
'_REPLACE_',
'project__id__exact={}'.format(project.id)
),
'data': data_project
}
)
return {
'title': _('Deployments / Schedule'),
'total': total,
'data': json.dumps(data),
'url': link.replace('?_REPLACE_', ''),
}
@login_required
def deployments_summary(request):
user = request.user.userprofile
return render(
request,
'deployments_summary.html',
{
'title': _('Deployments'),
'chart_options': {
'no_data': _('There are no data to show'),
'reset_zoom': _('Reset Zoom'),
},
'enabled_deployments': enabled_deployments(user),
'deployment_by_enabled': deployment_by_enabled(user),
'deployment_by_schedule': deployment_by_schedule(user),
'opts': Deployment._meta,
}
)
| migasfree/migasfree | migasfree/stats/views/deployments.py | Python | gpl-3.0 | 9,415 |
# Copyright (c) 2016, PerformLine, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the company nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL PERFORMLINE, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import unittest
from ....testing import client
from ..models import Chat
class TestChats(unittest.TestCase):
def setUp(self):
self.client = client()
def test_get_all_chats(self):
chats = list(self.client.chats())
self.assertIsInstance(chats, list)
self.assertEqual(len(chats), 3)
chat = chats[0]
self.assertIsInstance(chat, Chat)
self.assertEqual(chat.Id, 6)
self.assertEqual(chat.Type, 'chat')
self.assertEqual(chat.Score, 80)
self.assertEqual(chat.TrafficSourceId, 1)
self.assertEqual(chat.CampaignId, 9)
self.assertEqual(chat.BrandId, 15)
self.assertEqual(chat.CompanyId, 10)
self.assertEqual(chat.CreatedAt, '2018-07-24T00:00:00-04:00')
self.assertEqual(chat.LastScoredAt, '2018-07-24T00:00:00-04:00')
def test_chat_endpoint_access(self):
# Company token does not have access to page_id = 9
chat = list(self.client.chats(9))
self.assertIsInstance(chat, list)
self.assertTrue(len(chat) == 0)
def test_get_chat_by_id(self):
#Wil test that all attributes are returned for page id = 7
chat = self.client.chats(7)
self.assertIsInstance(chat, Chat)
self.assertEqual(chat.Id, 7)
self.assertEqual(chat.Type, 'chat')
self.assertEqual(chat.Score, 70)
self.assertEqual(chat.TrafficSourceId, 1)
self.assertEqual(chat.CampaignId, 9)
self.assertEqual(chat.BrandId, 15)
self.assertEqual(chat.CompanyId, 10)
self.assertEqual(chat.CreatedAt, '2018-07-24T00:00:00-04:00')
self.assertEqual(chat.LastScoredAt, '2018-08-24T00:00:00-04:00')
def test_get_chat_in(self):
#Will test parameters
chats = list(self.client.chats()) # this has 3 chats
chats_in_limit = list(self.client.chats(limit=2))
self.assertEqual(len(chats_in_limit), 2)
self.assertEqual(chats_in_limit[0].id, 6)
self.assertEqual(chats_in_limit[1].id, 7)
chats_in_offset = list(self.client.chats(offset=1))
self.assertEqual(len(chats_in_offset), 2)
self.assertEqual(chats_in_offset[0].id, 7)
self.assertEqual(chats_in_offset[1].id, 8)
chats_in_campaign = list(self.client.chats(campaign=9))
self.assertEqual(len(chats_in_campaign), 2)
self.assertEqual(chats_in_campaign[0].id, 6)
self.assertEqual(chats_in_campaign[1].id, 7)
chats_in_brand = list(self.client.chats(brand=15))
self.assertEqual(len(chats_in_brand), 3)
self.assertEqual(chats_in_brand[0].id, 6)
self.assertEqual(chats_in_brand[1].id, 7)
| PerformLine/python-performline-client | performline/products/chatscout/tests/chat_test.py | Python | bsd-3-clause | 4,279 |
"""The switch tests for the nexia platform."""
from homeassistant.const import STATE_ON
from .util import async_init_integration
async def test_hold_switch(hass):
"""Test creation of the hold switch."""
await async_init_integration(hass)
assert hass.states.get("switch.nick_office_hold").state == STATE_ON
| rohitranjan1991/home-assistant | tests/components/nexia/test_switch.py | Python | mit | 322 |
# vim: fileencoding=utf-8 spell spelllang=en ft=python
"""
:mod:`base8x` -- Encode binary data into ASCII and vice versa
=============================================================
.. module:: base8x
:platform: Python 2.x
:synopsis: Base-(85 ~ 95) binary/ASCII conversion
.. moduleauthor:: Cong Ma <[email protected]>
Example Usage
=============
>>> print z85codec.encode('\x86\x4f\xd2\x6f\xb5\x59\xf7\x5b') # ZeroMQ spec
HelloWorld
Additional doctest
==================
>>> import os
>>> txt = os.urandom(1024)
>>> new = z85codec.decode(z85codec.encode(txt))
>>> print txt == new
True
>>> adobe85codec = Base8xCodec(chr(x) for x in xrange(33, 33 + 85))
>>> print adobe85codec.encode('\xff\xff\xff\xff')
s8W-!
>>> adobe85codec.decode('s8W-$')
Traceback (most recent call last):
...
ValueError: Tried decoding illegal sequence: (s8W-$, 0x100000002)
COPYING
=======
3-clause BSD license, see the file COPYING.
"""
import struct
_MAXVAL = 2 ** 32 - 1
def _chunkby(seq, stride, padding=None):
"""Given input iterator *seq*, yields a list for every *stride* items, and
possibly fills the last chunk with the padding character *padding*.
This generator yields ``(chunk, npadding)``, where ``npadding`` is the
number of padding items at the tail. If *padding* is :const:`None`,
``npadding`` gives the hypothetical number provided that padding should not
be :const:`None`.
>>> [(''.join(p[0]), p[1]) for p in _chunkby('abcd', 3, 'X')]
[('abc', 0), ('dXX', 2)]
>>> [(''.join(p[0]), p[1]) for p in _chunkby('abcde', 3)]
[('abc', 0), ('de', 1)]
"""
stride = int(stride)
if stride <= 0:
raise ValueError("Stride parameter too small")
count = 0
tmp = []
for item in seq:
tmp.append(item)
count += 1
if count == stride:
yield tmp, 0
tmp = []
count = 0
if count != 0:
npadding = stride - count
if padding is not None:
tmp.extend([padding] * npadding)
yield tmp, npadding
def _validate_alphabet(seq):
"""Validates the encoding alphabet *seq*.
Returns :const:`None` if input is invalid, otherwise returns a string copy
of the valid alphabet.
>>> a = [chr(x) for x in xrange(35, 35 + 85)]
>>> print _validate_alphabet(a) # doctest: +ELLIPSIS
#$%&...tuvw
>>> print _validate_alphabet(a[:-1])
None
>>> print _validate_alphabet(a + ['\x8a'])
None
>>> print _validate_alphabet(['a'] + a)
None
"""
# Early-exit checker for the uniqueness of members.
seen = set()
accept = [] # Set membership is O(1), but maybe unnecessary anyway...
# Filter out duplicate or unprintable characters.
# Works even if seq never terminates, due to the pigeonhole principle.
for item in seq:
if item in seen or not 32 <= ord(item) <= 126:
return None
seen.add(item)
accept.append(item)
# Check size. Don't use len(seq), for it doesn't have to have a length.
if not 85 <= len(accept) <= 95:
return None
return "".join(accept)
def _codec_generator_factory(inchunksize, outchunksize, padchar,
ingroup2num, num2outgroup):
"""Factory that returns a conversion generator for codec purpose.
By binding different parameters to the enclosed generator, the
creations of encoding and decoding generators are unified.
Parameters to be bound to the enclosed generator
------------------------------------------------
- *inchunksize*: chunk size (in characters/bytes) of input stream
- *outchunksize*: chunk size of output stream
- *padchar*: padding character for incomplete chunk
- *ingroup2num*: function taking a string built from an input chunk that
converts it to the internal integer value
- *num2outgroup*: function taking an integer value that converts it to an
output chunk.
The returned generator is initialized by a single argument, the input
iterable that is to be encoded/decoded.
"""
def codec_gen(inputseq): # pylint:disable=C0111
chunked = _chunkby(inputseq, inchunksize, padchar)
for chunk, npadding in chunked:
val = ingroup2num(b"".join(chunk))
yield num2outgroup(val)[:(outchunksize - npadding)]
return codec_gen
class Base8xCodec(object):
"""Base8x encoding/decoding utility class.
An instance of the class is initialized by an ordered alphabet containing
at least 85 unique ASCII printable characters. For example:
>>> Base8xCodec(chr(x) for x in xrange(40, 40 + 85)) #doctest: +ELLIPSIS
<....Base8xCodec object at 0x...>
The class provides two methods, :method:`encode` and :method:`decode`.
No contraction measures (like Adobe's ``z`` encoding for 4 bytes of zeroes)
are supported.
"""
def __init__(self, alphabet):
self._alphabet = _validate_alphabet(alphabet)
if self._alphabet is None:
raise ValueError("Invalid input alphabet")
self._radix = len(self._alphabet)
self._ordmap = dict(((_chr, _idx) for _idx, _chr in
enumerate(self._alphabet)))
self._powersofradix = [self._radix ** _p for _p in xrange(4, -1, -1)]
self._powersenum = [_p for _p in enumerate(self._powersofradix)]
@staticmethod
def _get_num_by_seq_enc(seq):
"""Get integer value by 4-byte sequence."""
return struct.unpack(">1I", seq)[0]
def _encode_quartet(self, num):
"""Encode a single quartet by its integer value."""
enc = []
for offset in self._powersofradix:
enc.append(self._alphabet[(num // offset) % self._radix])
return b"".join(enc)
def _get_num_by_seq_dec(self, seq):
"""Get integer value by 5-byte sequence."""
val = 0
for i, offset in self._powersenum:
char = seq[i]
try:
pos = self._ordmap[char]
except KeyError:
raise ValueError("Tried decoding sequence '%s' "
"containing illegal character '%s' (0x%02x) "
"at position %d" % (seq, char, ord(char), i))
val += pos * offset
if val > _MAXVAL:
raise ValueError("Tried decoding illegal sequence: (%s, 0x%0x)" %
(seq, val))
return val
@staticmethod
def _decode_quintet(num):
"""Decode a single quintet by its integer value."""
return struct.pack(">1I", num)
def _make_encode_gen(self):
"""Create a new encoding generator."""
return _codec_generator_factory(4, 5, "\x00",
self._get_num_by_seq_enc,
self._encode_quartet)
def _make_decode_gen(self):
"""Create a new decoding generator."""
return _codec_generator_factory(5, 4, self._alphabet[-1],
self._get_num_by_seq_dec,
self._decode_quintet)
def encode(self, text):
"""Encode text.
*text* must be an iterable of characters or bytes.
Returns a string.
"""
encoder = self._make_encode_gen()
return "".join(encoder(text))
def decode(self, text):
"""Decode text.
*text* must be an iterable of characters in the alphabet with valid
5-character sequences.
Returns a string or bytes.
Raises :exc:`ValueError` if invalid input is encountered.
"""
decoder = self._make_decode_gen()
return b"".join(decoder(text))
# pylint: disable=C0103
z85codec = Base8xCodec("0123456789abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".-:+=^!/*?&<>()[]{}@%$#")
# pylint: enable=C0103
__all__ = ["Base8xCodec", "z85codec"]
if __name__ == "__main__":
import doctest
doctest.testmod()
| congma/base8x | base8x.py | Python | bsd-3-clause | 8,088 |
# -*- coding: utf-8 -*-
import os
from itertools import combinations
list(combinations(range(5), 3))
os.system("pause")
| NicovincX2/Python-3.5 | Algorithmique/Mathématiques discrètes/Combinatoire/Combinaison/comb.py | Python | gpl-3.0 | 123 |
"""Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
http://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import exists, join
from os import remove
import numpy as np
from .base import get_data_home
from .base import _fetch_remote
from .base import RemoteFileMetadata
from ..utils import Bunch
from .base import _pkl_filepath
from ..utils.fixes import makedirs
from ..externals import joblib
from ..utils import check_random_state
# The original data can be found in:
# http://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename='covtype.data.gz',
url='https://ndownloader.figshare.com/files/5976039',
checksum=('614360d0257557dd1792834a85a1cdeb'
'fadc3c4f30b011d56afee7ffb5b15771'))
logger = logging.getLogger(__name__)
def fetch_covtype(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the covertype dataset, downloading it if necessary.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
dataset.target : numpy array of shape (581012,)
Each value corresponds to one of the 7 forest covertypes with values
ranging between 1 to 7.
dataset.DESCR : string
Description of the forest covertype dataset.
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
if not exists(covtype_dir):
makedirs(covtype_dir)
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',')
# delete archive
remove(archive_path)
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/datasets/covtype.py | Python | mit | 3,977 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('metrics', '0004_metric_metadata'),
]
operations = [
migrations.AddField(
model_name='metric',
name='display_caption',
field=models.TextField(default=b'', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='metric',
name='display_group',
field=models.TextField(default=b'', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='metric',
name='display_order',
field=models.IntegerField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='metric',
name='display_subgroup',
field=models.TextField(default=b'', null=True, blank=True),
preserve_default=True,
),
]
| nanchenchen/lsst-comaf | comaf/apps/metrics/migrations/0005_auto_20150601_0202.py | Python | mit | 1,085 |
# This file is part of fedmsg.
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <[email protected]>
#
from fedmsg_meta_fedora_infrastructure import BaseProcessor
class ZanataProcessor(BaseProcessor):
__name__ = "Zanata"
__description__ = "translation events"
__link__ = "https://fedora.zanata.org"
__docs__ = "http://zanata.org/help/workflow-overview/"
__obj__ = "Translation Events"
__icon__ = "https://pbs.twimg.com/profile_images/" + \
"378800000417679469/47eb45c6205aa9f2cdb8705e6d46745c_normal.png"
def subtitle(self, msg, **config):
tmpl = self._(
"{docId} from the {project} project "
"is now {milestone} in the '{locale}' locale"
)
return tmpl.format(**msg['msg']).lower()
def secondary_icon(self, msg, **config):
return self.__icon__
def _object(self, msg):
return "/".join([
msg['msg']['project'],
msg['msg']['version'],
'languages',
msg['msg']['locale'],
])
def objects(self, msg, **config):
return set([self._object(msg)])
def link(self, msg, **config):
return "https://fedora.zanata.org/iteration/view/" + self._object(msg)
| Ghost-script/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/zanata.py | Python | lgpl-2.1 | 1,957 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script converts old-style <a> links to API docs to the new $ref links.
# See reference_resolver.py for more info on the format of $ref links.
import optparse
import os
import re
from docs_server_utils import SanitizeAPIName
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _WriteFile(filename, contents):
with open(filename, 'w') as f:
f.write(contents)
def _Replace(matches, filename):
title = matches.group(3)
if matches.group(2).count('#') != 1:
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
clean = (matches.group(2).replace('\\', '')
.replace("'", '')
.replace('"', '')
.replace('/', ''))
page, link = clean.split('#')
if not page:
page = '%s.html' % SanitizeAPIName(filename.rsplit(os.sep, 1)[-1])
if (not link.startswith('property-') and
not link.startswith('type-') and
not link.startswith('method-') and
not link.startswith('event-')):
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
link = re.sub('^(property|type|method|event)-', '', link).replace('-', '.')
page = page.replace('.html', '.').replace('_', '.')
if matches.group(1) == ' ':
padding = ''
else:
padding = matches.group(1)
if link in title:
return '%s$(ref:%s%s)' % (padding, page, link)
else:
return '%s$(ref:%s%s %s)' % (padding, page, link, title)
def _ConvertFile(filename, use_stdout):
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
contents = _ReadFile(filename)
contents = re.sub(regex,
lambda m: _Replace(m, filename),
contents)
contents = contents.replace('$(ref:extension.lastError)',
'$(ref:runtime.lastError)')
if use_stdout:
print contents
else:
_WriteFile(filename, contents)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Converts <a> links to $ref links.',
usage='usage: %prog [option] <directory>')
parser.add_option('-f', '--file', default='',
help='Convert links in single file.')
parser.add_option('-o', '--out', action='store_true', default=False,
help='Write to stdout.')
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
opts, argv = parser.parse_args()
if opts.file:
_ConvertFile(opts.file, opts.out)
else:
if len(argv) != 1:
parser.print_usage()
exit(0)
for root, dirs, files in os.walk(argv[0]):
for name in files:
_ConvertFile(os.path.join(root, name), opts.out)
| s20121035/rk3288_android5.1_repo | external/chromium_org/chrome/common/extensions/docs/server2/link_converter.py | Python | gpl-3.0 | 2,979 |
# AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2015-2016 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..base import AppBase
class Page(AppBase):
pass
def register(app_loader):
for app in (
Page,
):
app_loader.register(app())
| ossobv/asterisklint | asterisklint/app/vall/app_page.py | Python | gpl-3.0 | 916 |
from changes.config import db
from changes.models.project import ProjectOption
from changes.models.snapshot import Snapshot, SnapshotStatus
from changes.testutils import APITestCase
class SnapshotDetailsTest(APITestCase):
def test_simple(self):
project = self.create_project()
snapshot = self.create_snapshot(project)
path = '/api/0/snapshots/{0}/'.format(snapshot.id)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == snapshot.id.hex
assert data['project']['id'] == project.id.hex
assert data['build'] is None
class UpdateSnapshotTest(APITestCase):
def setUp(self):
super(UpdateSnapshotTest, self).setUp()
self.project = self.create_project()
self.snapshot = self.create_snapshot(self.project)
self.path = '/api/0/snapshots/{0}/'.format(self.snapshot.id.hex)
def test_simple(self):
for status in ('active', 'failed', 'invalidated'):
resp = self.client.post(self.path, data={
'status': status,
})
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == self.snapshot.id.hex
assert data['status']['id'] == status
db.session.expire(self.snapshot)
snapshot = Snapshot.query.get(self.snapshot.id)
assert snapshot.status == SnapshotStatus[status]
def test_invalid_status(self):
resp = self.client.post(self.path, data={
'status': 'invalid_status',
})
assert resp.status_code == 400
def test_set_current(self):
for status in ('failed', 'invalidated', 'active'):
resp = self.client.post(self.path, data={
'status': status,
'set_current': True,
})
options = dict(db.session.query(
ProjectOption.name, ProjectOption.value
).filter(
ProjectOption.project == self.project,
))
if status == 'active':
assert resp.status_code == 200
assert options.get('snapshot.current') == self.snapshot.id.hex
else:
assert resp.status_code == 400
assert options.get('snapshot.current') != self.snapshot.id.hex
| dropbox/changes | tests/changes/api/test_snapshot_details.py | Python | apache-2.0 | 2,394 |
import logging
from urllib import quote
import jsonpickle
from MisuseCase import MisuseCase
from tests.CairisTests import CairisTests
__author__ = 'Robin Quetin'
class MisuseCaseTests(CairisTests):
# region Class fields
logger = logging.getLogger(__name__)
existing_risk_name = 'Unauthorised Certificate Access'
misuse_case_class = MisuseCase.__module__+'.'+MisuseCase.__name__
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/misuse-cases?session_id=test')
misuse_cases = jsonpickle.decode(rv.data)
self.assertIsNotNone(misuse_cases, 'No results after deserialization')
self.assertIsInstance(misuse_cases, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(misuse_cases), 0, 'No misuse_cases in the dictionary')
self.logger.info('[%s] MisuseCases found: %d', method, len(misuse_cases))
misuse_case = misuse_cases.values()[0]
self.logger.info('[%s] First misuse_case: %s [%d]\n', method, misuse_case['theName'], misuse_case['theId'])
def test_get_by_name(self):
method = 'test_get_by_risk'
url = '/api/misuse-cases/risk/%s?session_id=test' % quote(self.existing_risk_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
misuse_case = jsonpickle.decode(rv.data)
self.assertIsNotNone(misuse_case, 'No results after deserialization')
self.logger.info('[%s] MisuseCase: %s [%d]\n', method, misuse_case['theName'], misuse_case['theId']) | RobinQuetin/CAIRIS-web | cairis/cairis/tests/MisuseCaseTests.py | Python | apache-2.0 | 1,642 |
from setuptools import setup, find_packages
setup(
name='StatHat',
version='1.1',
author='Valentin Bora',
author_email = '[email protected]',
description = 'StatHat Python client',
url = "http://",
license = "MIT",
packages = find_packages(exclude=['tests']),
zip_safe = False,
include_package_data = True,
package_data = {
'': ['*.png'],
},
entry_points = {
}
)
| valentinbora/stathat | setup.py | Python | mit | 408 |
class Node:
def __init__(self, val):
self.value = val
self.left = None
self.right = None
# Needed for deletion, to heal our poor tree's severed stumps
self.parent = None
# For AVL tree-balancing, a node must
# keep track of its own balance offset.
# Note that the root node always starts
# with a balance_offset of zero.
self.balance_offset = 0
def __str__(self):
# inspired by:
# https://github.com/caseymacphee/Data-structures/blob/master/bst.py
return str(self.value)
class BalancingBinarySearchTree:
# With much assistance from:
# https://github.com/caseymacphee/Data-structures/blob/master/bst.py
# and
# http://stackoverflow.com/questions/5444394/
# implementing-binary-search-tree-in-python
def __init__(self):
self.root_node = None
self.size_counter = 0
# This is not related to the AVL tree's balance measures, but
# it can serve as a mildly interesting way to keep track of the
# performance balance of the tree balancing function by comparison.
self.size_balance_counter = 0
def correct_balance(self, this_node):
# Reference:
# http://interactivepython.org/
# courselib/static/pythonds/Trees/balanced.html
if ((this_node.balance_offset > 1)
or (this_node.balance_offset < -1)):
self.avl_balance(this_node)
# It shouldn't need to continue checking after this point, sp:
return
if this_node.parent is not None:
# Check if this_node is the left branch of the parent node:
if this_node.parent.left == this_node:
# Balance offsets on each node match the direction of the
# tree's sign -- left is positive offset, right is negative.
this_node.parent.balance_offset += 1
# Now check if this_node is the right branch of the parent node:
elif this_node.parent.right == this_node:
this_node.parent.balance_offset -= 1
# If the parent node's balance is not zero, check them too.
# ((Note that this whole procedure only works properly if
# correct_balance() is called after every change to the tree;
# when this cannot be guaranteed, there may be situations
# where the tree doesn't realize the balance is askew.))
if this_node.parent.balance_offset != 0:
# Note: This is a tail recursion point. It is NOT intended
# to be the same as calling self.avl_balance().
self.correct_balance(this_node.parent)
def avl_balance(self, this_node):
# Reference:
# http://interactivepython.org/
# courselib/static/pythonds/Trees/balanced.html
# If this_node's balance_offset is zero, no rebalancing is needed.
if this_node.balance_offset == 0:
return
# If the node's balance offset is negative,
# the branches below it must be right-heavy.
if this_node.balance_offset < 0:
# Being right-heavy doesn't mean that the right branch is
# correctly balanced, itself. The right branch could still
# be locally imbalanced to the left. In this case a separate
# pre-balancing step on the right branch should be performed
# before self.rotate_left(this_node) is called.
if this_node.right.balance_offset > 0:
self.rotate_right(this_node.right)
# Once the subtree's rotation is corrected, this_node may
# safely be rotated to the left:
self.rotate_left(this_node)
# If there is no imbalance in the subtree opposite to the
# imbalance of this_node, this_node may safely be rotated left:
else:
self.rotate_left(this_node)
# Handling cases where the tree is left-heavy:
elif this_node.balance_offset > 0:
# As above, but with inverted balance checking.
# If this_node's left branch has a rightwards imbalance
# (even though this_node is itself imbalanced leftwards)...
if this_node.left.balance_offset < 0:
# ... then pre-balance the left branch leftwards to correct
# that rightwards imbalance...
self.rotate_left(this_node.left)
# ... and then rotate this_node to resolve its own imbalance:
self.rotate_right(this_node)
# If this_node's left branch has no rightwards imbalance of
# its own, this_node may safely be rotated rightwards
# (to correct its own leftwards offset).
else:
self.rotate_right(this_node)
def rotate_left(self, old_root_node):
# old_root_node refers to the node that is being rotated;
# its position (relative to the rest of the tree) is where one
# of its branch nodes will be after we're done.
# The term "root" will refer to that relative position.
# Before swapping any nodes' branch or parent references,
# assign the right branch of the old_root_node to a holding
# variable so we can keep track of it.
new_root_node = old_root_node.right
# The first node pointer swap.
# (This effectively replaces the old_root_node's right pointer
# with the previous occupant's own left pointer, leaving new_root_node
# with a reference to the old_root_node as a parent, but without
# a corresponding branch reference on old_root_node.)
old_root_node.right = new_root_node.left
# Before moving on, we must tie up a loose end we created --
# the new_root_node.left pointer is changed, but its parent reference
# is now incorrect. So, if there is actually a node there (see None),
# make it aware of the change in parentage:
if new_root_node.left is not None:
new_root_node.left.parent = old_root_node
# This informs the new_root_node of its parent node, by way of
# the parent reference on the old_root_node. This is where
# I started to think of the "old_root_node" as being the
# "old_root_node" due to how many pointers have now been swapped.
new_root_node.parent = old_root_node.parent
# If the old_root_node was the root of
# the tree, two special changes must be made.
# If a node doesn't have a parent node, it had
# better be the root of the tree. You can also
# check if old_root_node == self.root_node
# if you'd rather the relation be more explicit.
if old_root_node.parent is None:
# First, swap the tree's root reference:
self.root_node = new_root_node
# If the old_root_node was not the root of the tree, we can
# now inform the parents of their horizontal transdoption:
else:
# Check if the old_root_node was the left child of its parent.
if old_root_node.parent.left == old_root_node:
# If so, correct the old parent's left branch pointer to
# the new_root_node, cementing its position in the tree:
old_root_node.parent.left = new_root_node
# If the old_root_node wasn't the tree root and it wasn't
# the left branch of its parent node, it must have been the
# right branch of its parent node.
else:
# Informing the root position's parent of the new root node:
old_root_node.parent.right = new_root_node
# Now that the nodes have been swapped in each others' pointers
# and the parent node has been informed, we can move the
# old_root_node in to the open slot left by reassigning
# new_root.left.parent to old_root_node.left (or rather,
# assigning the nodes in *that position* to *that position*.)
new_root_node.left = old_root_node
# Swap the old_root_node's parent
# pointer to the new_root_node.
old_root_node.parent = new_root_node
# Next we must properly modify all involved nodes'
# balance_offsets to reflect the recent changes.
# Note that this alteration is NOT
# handled by correct_balance().
# First, increment the old_root_node's balance offset by one to
# reflect its leftward shift:
old_root_node.balance_offset += 1
# Then, if the new_root_node's balance_offset is negative (rightwards),
# apply it to the old_root_node's balance_offset as an increment.
# This is like taking the absolute value of the new_root_node's
# balance_offset and adding it to the old_root_node's balance_offset,
# but only if it is a value below zero before absolute-valuification.
# Figuring this step out had me boggled for a while and would
# probably require some extra research to really memorize.
# A full algebraic derivation of this logic can be found at:
# http://interactivepython.org/
# courselib/static/pythonds/Trees/balanced.html
old_root_node.balance_offset -= min(new_root_node.balance_offset, 0)
# Next is the corresponding procedure in the opposite direction
# for the new_root_node's balance_offset.
new_root_node.balance_offset += 1
# Remember we're rotating left here, so everything should only go up.
new_root_node.balance_offset += max(old_root_node.balance_offset, 0)
# And we're done with left-rotating! Hooray!
def rotate_right(self, old_root_node):
# Once again, thanks to
# http://interactivepython.org/
# courselib/static/pythonds/Trees/balanced.html
# for the excellent reference.
# Rotating right is just like rotating left, except in
# the opposite direction;
# i.e., it's the symmetrical counterpart to rotate_left().
# For this reason, I'll omit most of the comments I made in
# rotate_left() and instead point out the relevant differences.
# "Root" herein refers to the relative position of the root node
# of the subtree that is being rotated. It does NOT refer to the
# entire tree's root_node.
# Since we're rotating right, we'll need to use the old_root_node's
# left branch as the new_root_node.
new_root_node = old_root_node.left
old_root_node.left = new_root_node.right
if new_root_node.right is not None:
new_root_node.right.parent = old_root_node
# Note that the symmetric changes only apply to chiral distinctions.
new_root_node.parent = old_root_node.parent
if old_root_node.parent is None:
self.root_node = new_root_node
else:
if old_root_node.parent.right == old_root_node:
old_root_node.parent.right = new_root_node
else:
old_root_node.parent.left = new_root_node
new_root_node.right = old_root_node
old_root_node.parent = new_root_node
# This next part is critically distinct from
# its counterpart in left_rotation().
# Where left_rotation was always incrementing offsets, right_rotation
# will only ever be decrementing them.
# This means we must swap the 'crement operator's sign to negative:
old_root_node.balance_offset -= 1
# In rotate_left() it was necessary to decrement by the least of
# either a potentially negative number or zero -- an operation
# which only ever yielded a HIGHER value or kept the number the same.
# This is because subtracting by a negative number causes the resulting
# value to increase, where adding a negative number to something
# causes it to decrease. Third grade stuff, maybe, but all
# too easily mistaken if not laid out explicitly.
old_root_node.balance_offset -= max(new_root_node.balance_offset, 0)
new_root_node.balance_offset -= 1
# Here, instead of adding the greatest one of either a potentially
# positive number or zero (as in leftward rotation), we will be
# "adding" the least one of a potentially negative number or zero,
# (making the number either decrease in value or stay the same).
new_root_node.balance_offset += min(old_root_node.balance_offset, 0)
# Once again, a full derivation of left_rotate()'s version
# of the above balance_offset modification may be found at
# the same site I referenced while doing this assignment:
# http://interactivepython.org/
# courselib/static/pythonds/Trees/balanced.html
def insert(self, val):
''' Insert val into the binary search tree as a node, maintaining
order. If val is already present, it will be ignored. '''
if val is None:
raise TypeError("NoneType is not sortable")
if not isinstance(val, int):
raise TypeError("May only insert integers")
# If there's no root node, make this node the root node.
# It's a bit like a reset button, and it's more reliable to
# verify these things here in the event of an imaginary
# horizontal attack on tree integrity.
if self.root_node is None:
self.root_node = Node(val)
self.size_counter = 1
# The center node has balance zero, by definition:
self.size_balance_counter = 0
else:
current_node = self.root_node
passes_so_far = 0
which_side_are_we_placing_it_on = "Unknown"
# Break after node creation or upon duplication discovery.
while True:
# This will only be incremented when the tree moves
# down a rank, and since it resets to zero every insertion,
# this reflects the depth of the tree.
passes_so_far += 1
# No-duplication rule:
if current_node.value == val:
break
# If it's smaller than the current node,
# it must go somewhere on the left.
if current_node.value > val:
# Only updates on the first branchpoint.
if which_side_are_we_placing_it_on == "Unknown":
which_side_are_we_placing_it_on = "Left"
if current_node.left is None:
# Must be incremented upon completion
# to reflect the newly added branch.
# Above the new Node construction
# because it is used by it.
passes_so_far += 1
current_node.left = Node(val)
current_node.left.parent = current_node
self.size_counter += 1
# This information is related to the first branchpoint;
# it cannot be determined from the mere fact we are
# placing a node which is on the left of *something*.
if which_side_are_we_placing_it_on == "Left":
self.size_balance_counter += 1
# If a node was added here, check for
# and correct potential tree imbalances:
self.correct_balance(current_node.left)
else:
# passes_so_far updates at the top of the loop.
# It doesn't need to be touched here.
current_node = current_node.left
# Then it's closer to the median value of the tree.
# This is not like the binary heap; the middlemost
# value is at the root.
elif current_node.value < val:
# Only updates on the first branchpoint.
if which_side_are_we_placing_it_on == "Unknown":
which_side_are_we_placing_it_on = "Right"
if current_node.right is None:
# Must be incremented upon completion
# to reflect the newly added branch.
# Above the new Node construction
# because it is used by it.
passes_so_far += 1
current_node.right = Node(val)
current_node.right.parent = current_node
self.size_counter += 1
# This information is related to the first branchpoint;
# it cannot be determined from the mere fact we are
# placing a node which is on the right of *something*.
if which_side_are_we_placing_it_on == "Right":
self.size_balance_counter -= 1
# If a node was added here, check for
# and correct potential tree imbalances:
self.correct_balance(current_node.right)
else:
# passes_so_far updates at the top of the loop.
# It doesn't need to be touched here.
current_node = current_node.right
# If the node is precisely equal, it violates the
# no-duplication rule. It should never get here, but
# just in case I'm not as smart as I think I am...
else:
print("Double violation of no-duplication rule discovered")
break
def contains(self, val, return_the_node=False):
''' Return True is val is in the binary search tree;
otherwise, return False. '''
if val is None:
raise ValueError("NoneType is not sortable")
# If there's no root node, make this node the root node.
if self.root_node is None:
return False
else:
current_node = self.root_node
# Break after node creation or upon duplication discovery.
while True:
# No-duplication rule:
if current_node.value == val:
if return_the_node is True:
# NOTE: if there is no such node,
# the return is still False.
return current_node
else:
return True
# If it's smaller than the current node,
# it must go somewhere on the left.
if current_node.value > val:
if current_node.left is not None:
current_node = current_node.left
# Since we start at the root, False is safe here.
# In fact, it's required to prevent infinilooping.
else:
return False
# Then it must be somewhere on the right.
elif current_node.value < val:
if current_node.right is not None:
current_node = current_node.right
else:
return False
elif (not current_node.left) and (not current_node.right):
return False
# Double violation of no-duplication rule
else:
print("Double violation of no-duplication rule discovered")
break
def size(self):
return self.size_counter
def depth(self):
return self._return_tree_max_depth()
def balance(self):
# Deprecated in favor of a recursive solution:
# return self.size_balance_counter
# Deprecated because size balance is unimportant for BSTs:
# return self._return_tree_size_balance(self.root_node)
# This returns the HEIGHT balance of the BST:
return self.root_node.balance_offset
def _return_tree_size_balance(self, current_node, is_first_pass=True):
# This returns SIZE imbalance for a node's subtrees.
# Size imbalances are totally unimportant for lookup times in
# a binary search tree, because comparisons only happen at
# branching points.
# So, this code is largely unimportant and is only a novelty now.
# Note that this function is recursive in three locations, but
# only ever calls recursion at most twice from each previous call.
# It makes zero recursive calls on nodes with no branches.
# The root_node has zero effect on balance, but every other node
# will change the balance counter by one each.
return_value = 0
# This is the result of my discovering AVL trees only balance
# subtrees by HEIGHT, not SIZE. The exception is only raised
# if AVL is not working properly -- the balance_offset tracks
# HEIGHT BALANCE of subtrees, not SIZE BALANCE, which is tracked
# by self._return_tree_size_balance(), which is totally unimportant
# for the purposes of speeding up information retrieval.
if current_node.balance_offset > 1:
raise Exception(" HEIGHT IMBALANCE! {}".format(
current_node.balance_offset))
if is_first_pass is False:
return_value += 1
if current_node.left:
return_value += self._return_tree_size_balance(
current_node.left, is_first_pass=False)
# Leaving in the following few lines for future reference:
# if is_first_pass == True:
# leftside = self._return_tree_size_balance(
# current_node.left, is_first_pass=False)
# print("Leftside: {}".format(leftside))
# print("Leftside in-order print:")
# self.in_order_print(current_node.left)
# Only the top of the recursion tree should flip the sign of the
# size of the right portion of the tree (to negative).
if is_first_pass is True:
if current_node.right:
return_value -= self._return_tree_size_balance(
current_node.right, is_first_pass=False)
# Leaving in the following few lines for future reference:
# rightside = self._return_tree_size_balance(
# current_node.right, is_first_pass=False)
# print("Rightside: -{}".format(rightside))
# print("Rightside in-order print:")
# self.in_order_print(current_node.right)
elif is_first_pass is False:
if current_node.right:
return_value += self._return_tree_size_balance(
current_node.right, is_first_pass=False)
return return_value
def delete(self, val, current_node=None):
# This function handles both delete-by-object and delete-by-value.
# Note that a Node containing another Node will fail this check,
# but then, that shouldn't ever happen. Caveat... usor?
if isinstance(current_node, Node):
the_node_to_delete = current_node
else:
the_node_to_delete = self.contains(val, return_the_node=True)
# contains(val, return_the_node=True) will return False if there
# is no node with that value in the tree.
if the_node_to_delete is False:
# Then the node is not in the tree.
# "Fail" gracefully:
return None
elif isinstance(the_node_to_delete, Node):
# If it gets past that check, we know it's in the tree because
# self.contains() actually returned a node from the tree.
# So, do balance ahead of time:
if the_node_to_delete.value == self.root_node.value:
# Then it's the root node.
# Still needs to be fully considered for deletion,
# so we can't end the function when we know this.
self.size_balance_counter += 0 # Syntactic consistency
elif the_node_to_delete.value > self.root_node.value:
# Righter trees are more negative,
# lefter trees are more positive.
self.size_balance_counter += 1
elif the_node_to_delete.value < self.root_node.value:
self.size_balance_counter -= 1
# If the node is a "leaf" (ie, it has no descendants),
# delete it outright.
if ((the_node_to_delete.left is None)
and (the_node_to_delete.right is None)):
# The root_node case:
if the_node_to_delete.parent is not None:
if the_node_to_delete.parent.right == the_node_to_delete:
the_node_to_delete.parent.right = None
if the_node_to_delete.parent.left == the_node_to_delete:
the_node_to_delete.parent.left = None
else:
# Inform the tree it has no root_node anymore.
self.root_node = None
# Do we even need to do this if we remove the references?
# Yes, since extra-arboreal objects might still contain
# references to this node.
# AVL trees do not need to rebalance for
# height when a leaf has been deleted.
del the_node_to_delete
return None
# If the node is a branch with one descendant,
# mend the tree by connecting that descendant to
# the node's parent.
elif ((the_node_to_delete.right is not None)
and (the_node_to_delete.left is None)):
if the_node_to_delete.parent is not None:
the_node_to_delete.parent.right = the_node_to_delete.right
the_node_to_delete.right.parent = the_node_to_delete.parent
# AVL-balanced trees must rebalance at every node deletion:
self.correct_balance(the_node_to_delete.parent)
else:
# Inform the tree the root_node has changed:
self.root_node = the_node_to_delete.right
self.correct_balance(self.root_node)
del the_node_to_delete
elif ((the_node_to_delete.right is None)
and (the_node_to_delete.left is not None)):
if the_node_to_delete.parent is not None:
the_node_to_delete.parent.left = the_node_to_delete.left
the_node_to_delete.left.parent = the_node_to_delete.parent
# AVL-balanced trees must rebalance at every node deletion:
self.correct_balance(the_node_to_delete.parent)
else:
# Inform the tree the root_node has changed:
self.root_node = the_node_to_delete.left
self.correct_balance(self.root_node)
del the_node_to_delete
# If the node is a branch with two descendants,
# mend the tree in a way that brings it closer
# to a well-balanced state (self.balance == 0)
elif ((the_node_to_delete.right is not None)
and (the_node_to_delete.left is not None)):
# This function returns the length of a given node's subtree.
# It is to be called on the_node_to_delete.left AND
# the_node_to_delete.right, and whichever returns greater will
# be the new replacement node.
# If tied, which_way_to_balance_the_whole_tree will decide it.
def _find_furthest_subtree_size_and_node(
each_node, which_way_at_top, current_depth=1):
current_depth += 1
# Which way at top is opposite the way
# we're looking down the subtree.
if which_way_at_top == "Left":
if each_node.right is not None:
return _find_furthest_subtree_size_and_node(
each_node.right, "Left", current_depth)
else:
return current_depth, each_node
else:
if each_node.left is not None:
return _find_furthest_subtree_size_and_node(
each_node.left, "Right", current_depth)
else:
return current_depth, each_node
left_subtree_size, rightest_left_subtree_node \
= _find_furthest_subtree_size_and_node(
the_node_to_delete.left, "Left")
right_subtree_size, leftest_right_subtree_node \
= _find_furthest_subtree_size_and_node(
the_node_to_delete.right, "Right")
# # Hackishly force one to be bigger if they're equal.
# # Makes it balance by height, since it's an AVL tree.
# if left_subtree_size == right_subtree_size:
# # Add it to the right subtree
# # because negative balance is righter.
# right_subtree_size += (self.root_node.balance_offset /
# abs(self.root_node.balance_offset))
if left_subtree_size >= right_subtree_size:
# Then rebalance the tree using the left
# subtree as the new replacement node.
the_node_to_delete.value = rightest_left_subtree_node.value
# We must run delete() on the rightest left subtree
# node because it could still have a left branch on it.
self.delete(rightest_left_subtree_node)
elif left_subtree_size < right_subtree_size:
# Then rebalance the tree using the right
# subtree as the new replacement node.
the_node_to_delete.value = leftest_right_subtree_node.value
# We must run delete() on the rightest left subtree
# node because it could still have a left branch on it.
self.delete(leftest_right_subtree_node)
# I realized it's not possible to quickly tell if there's
# another node with the same depth on some other branch
# when trying to find depth via state held in a Node attribute.
# So I made a recursive solution that adds the depth
# of every node to a list held by the tree and finds
# the max value in that list, and removed Nodes' "depth" attribute.
return None
else:
raise TypeError("%s returned by contains but is not Node type"
% (the_node_to_delete))
def _return_tree_max_depth(self):
# Reset the old list.
# This list is kept in the tree object because
# we need lots of little threads to add to it
# while they're branching out from each other.
# I think having them all return things would
# cause return problems or something -- I could
# append the result of each thing to a list and
# send the list up, but this is easier to debug...
self._depth_finder_list = []
# Init it to zero, since we always start "above" the root node.
def _recursive_depth_list_builder(root_of_current_comparison,
depth_at_this_step=0):
if root_of_current_comparison is None:
# This is a tree-level list so that
# many recursive branches can all add to it.
# I'm virtually certain this is not ideal,
# but I also think it'll work!
self._depth_finder_list.append(depth_at_this_step)
return
# Increment this AFTER we determine if there was a node here
# or not, since we append this value to the list BEFORE this.
depth_at_this_step += 1
_recursive_depth_list_builder(root_of_current_comparison.left,
depth_at_this_step=depth_at_this_step)
_recursive_depth_list_builder(root_of_current_comparison.right,
depth_at_this_step=depth_at_this_step)
_recursive_depth_list_builder(self.root_node)
# If it didn't return any list contents, it
# should return a depth of zero, since that's
# how it got that problem in the first place.
if len(self._depth_finder_list) == 0:
return 0
else:
return max(self._depth_finder_list)
# Reference:
# http://stackoverflow.com/questions/5444394/
# implementing-binary-search-tree-in-python
# Is a public function, so no underscore.
def in_order_print(self, root_of_current_comparison='self.root', returning=False):
''' Print the entire tree in ascending order of value.
This function is always called with a Node from the tree
it's called on. To print the whole tree, call:
self.in_order_print(self.root_node)
To return values in a list instead of printing them
individually, use the kwarg returning=True '''
if root_of_current_comparison == 'self.root_node':
root_of_current_comparison = self.root_node
if not root_of_current_comparison:
return []
return_list = []
return_list += self.in_order_print(root_of_current_comparison.left, returning=True)
return_list += [root_of_current_comparison.value]
return_list += self.in_order_print(root_of_current_comparison.right, returning=True)
if returning is True:
return return_list
if returning is False:
print return_list
if __name__ == "__main__":
balancing_tree = BalancingBinarySearchTree()
print("The worst case scenario to find 9 in an unbalanced tree\n"
"would be as long as a linked list, or O(n).\n\nHowever, for "
"an AVL tree, it is merely O(log n), because\nthe tree balances "
"by height as necessary at every insertion.\nThe depth of this "
"worst-case tree is therefore only four when\nit could have "
"been nine, and its height balance offset is,\ncorrectly, "
"fewer than two points from zero:")
for each in range(1, 10):
balancing_tree.insert(each)
balancing_tree.in_order_print(balancing_tree.root_node)
print("Size: %r\nDepth: %r\nBalance:%r\n" % (balancing_tree.size(),
balancing_tree.depth(),
balancing_tree.balance()))
balancing_tree = BalancingBinarySearchTree()
print("Best case scenario to find 9 is constant time:")
balancing_tree.insert(9)
balancing_tree.in_order_print(balancing_tree.root_node)
print("Size: %r\nDepth: %r\nBalance:%r\n" % (balancing_tree.size(),
balancing_tree.depth(),
balancing_tree.balance()))
print("Due to unremitting balancing at every insertion, the average\n"
"case lookup operation on *all* AVL-sorted BSTs is O(log n).\n")
proceed = raw_input("The algorithm will next print the contents and\n"
"metrics of one hundred randomly generated trees.\n"
"If this is not desired, press control-c.\n> ")
import random
print("\n================= Begin random tree examples =================\n")
for each_pass in range(0, 100):
balancing_tree = BalancingBinarySearchTree()
for each in range(5, random.randint(10, 100)):
balancing_tree.insert(random.randint(0, 100))
balancing_tree.in_order_print(balancing_tree.root_node)
print("Size: %r\nDepth: %r\nBalance:%r\n" % (balancing_tree.size(),
balancing_tree.depth(),
balancing_tree.balance()))
| BFriedland/data-structures | balancing_tree/balancing_tree.py | Python | mit | 36,928 |
# pylint: disable=W0611
# flake8: noqa
from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.core.sparse.series import SparseSeries
from pandas.core.sparse.frame import SparseDataFrame
| harisbal/pandas | pandas/core/sparse/api.py | Python | bsd-3-clause | 206 |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from datetime import timedelta
from gluon import current, Field
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_EMPTY_OR, IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3query import FS
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent, s3_unicode
from s3.s3validators import IS_ONE_OF
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
"""
Template settings for DRM Portal
"""
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ("DRMP", "default/users")
settings.base.system_name = T("Timor-Leste Disaster Risk Management Information System")
settings.base.system_name_short = T("DRMIS")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
settings.auth.registration_requires_approval = True
settings.auth.registration_requires_verification = False
settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 6 # Realms
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
def drmp_realm_entity(table, row):
"""
Assign a Realm Entity to records
"""
tablename = table._tablename
if tablename == "cms_post":
# Give the Post the Realm of the author's Organisation
db = current.db
utable = db.auth_user
otable = current.s3db.org_organisation
if "created_by" in row:
query = (utable.id == row.created_by) & \
(otable.id == utable.organisation_id)
else:
query = (table.id == row.id) & \
(utable.id == table.created_by) & \
(otable.id == utable.organisation_id)
org = db(query).select(otable.pe_id,
limitby=(0, 1)).first()
if org:
return org.pe_id
# Follow normal rules
return 0
settings.auth.realm_entity = drmp_realm_entity
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "DRMP"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
("tet", "Tetum"),
])
# Default Language
settings.L10n.default_language = "tet"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0900"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
settings.L10n.translate_cms_series = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["TL"]
# Until we add support to S3LocationSelector to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# GeoNames username
settings.gis.geonames_username = "tldrmp"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"AUD" : T("Australian Dollars"),
"EUR" : T("Euros"),
"GBP" : T("Great British Pounds"),
"USD" : T("United States Dollars"),
}
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# Custom icon classes
settings.ui.custom_icons = {
"alert": "icon-alert",
"activity": "icon-activity",
"assessment": "icon-assessment",
"contact": "icon-contact",
"incident": "icon-incident",
"project": "icon-project",
"report": "icon-report",
"resource": "icon-resource",
}
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# Disabled until ready for prime-time
settings.search.filter_manager = False
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
settings.org.site_label = "Office"
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
settings.msg.notify_subject = "$S %s" % T("Notification")
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "AUD":
return "A$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
if person_id:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups within render, but doing these in the bulk query
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="main_contact_ph",
),
P(I(_class="icon-home"),
" ",
address,
_class="main_office-add",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_events(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Events on the Disaster Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["event_event.id"]
item_class = "thumbnail"
raw = record._row
name = record["event_event.name"]
date = record["event_event.start_date"]
closed = raw["event_event.closed"]
event_type = record["event_event_type.name"]
event_url = URL(c="event", f="event",
args=[record_id, "profile"])
comments = raw["event_event.comments"] or ""
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
permit = current.auth.s3_has_permission
table = resource.table
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="event", f="event",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.event_event.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
tally_alerts = 0
tally_incidents = 0
tally_assessments = 0
tally_activities = 0
tally_reports = 0
db = current.db
s3db = current.s3db
ltable = s3db.event_post
table = db.cms_post
stable = db.cms_series
types = ["Alert", "Incident", "Assessment", "Activity", "Report"]
query = (table.deleted == False) & \
(ltable.event_id == record_id) & \
(ltable.post_id == table.id) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Alert":
tally_alerts += 1
elif series == "Incident":
tally_incidents += 1
elif series == "Assessment":
tally_assessments += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="img",
args=["event", "%s.png" % event_type]),
),
_class="pull-left",
_href=event_url,
),
DIV(SPAN(A(name,
_href=event_url,
_class="media-heading"
),
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header-select",
),
DIV(P(comments),
P(T("Alerts"),
SPAN(tally_alerts,
_class="badge badge-warning",
),
T("Incidents"),
SPAN(tally_incidents,
_class="badge",
),
T("Assessments"),
SPAN(tally_assessments,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
ltable = s3db.project_location
table = db.project_project
query = (table.deleted == False) & \
(ltable.deleted == False) & \
(ltable.project_id == table.id) & \
(ltable.location_id.belongs(locations))
rows = db(query).select(table.id, distinct=True)
tally_projects = len(rows)
tally_incidents = 0
tally_activities = 0
tally_reports = 0
table = s3db.cms_post
stable = db.cms_series
types = ["Incident", "Activity", "Report"]
query = (table.deleted == False) & \
(table.location_id.belongs(locations)) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Incident":
tally_incidents += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# https://code.google.com/p/web2py/issues/detail?id=1533
public_url = current.deployment_settings.get_base_public_url()
if public_url.startswith("http://127.0.0.1"):
# Assume Rocket
image = quote_unicode(s3_unicode(name))
else:
# Assume Apache or Cherokee
image = s3_unicode(name)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src="%s/%s.png" % (URL(c="static",
f="themes",
args=["DRMP", "img"]),
image),
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(T("Incidents"),
SPAN(tally_incidents,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
T("Projects"),
SPAN(tally_projects,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-remove-sign"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_offices(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Offices on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_office.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_office.name"]
author = record["org_office.modified_by"]
date = record["org_office.modified_on"]
organisation = record["org_office.organisation_id"]
organisation_id = raw["org_office.organisation_id"]
location = record["org_office.location_id"]
location_id = raw["org_office.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"]
office_type = record["org_office.office_type_id"]
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_office
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="office",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_office.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
avatar = logo
body = TAG[""](P(name),
P(I(_class="icon-flag"),
" ",
SPAN(office_type),
" ",
_class="main_contact_ph",
),
P(I(_class="icon-home"),
" ",
address,
_class="main_office-add",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
# @ToDo: Use s3db.org_organisation_list_layout ?
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6"
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
# @ToDo: Just take National offices
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
phone = raw["org_organisation.phone"] or ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
db = current.db
s3db = current.s3db
table = s3db.project_project
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_projects = db(query).count()
tally_assessments = 0
tally_activities = 0
tally_reports = 0
table = s3db.cms_post
atable = db.auth_user
stable = db.cms_series
types = ["Assessment", "Activity", "Report"]
query = (table.deleted == False) & \
(table.created_by == atable.id) & \
(atable.organisation_id == record_id) & \
(stable.id == table.series_id) & \
(stable.name.belongs(types))
rows = db(query).select(stable.name)
for row in rows:
series = row.name
if series == "Assessment":
tally_assessments += 1
elif series == "Activity":
tally_activities += 1
elif series == "Report":
tally_reports += 1
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="main_contact_ph",
),
P(I(_class="icon icon-home"),
" ",
address,
_class="main_office-add",
),
P(T("Projects"),
SPAN(tally_projects,
_class="badge",
),
T("Activities"),
SPAN(tally_activities,
_class="badge",
),
T("Reports"),
SPAN(tally_reports,
_class="badge",
),
T("Assessments"),
SPAN(tally_assessments,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_posts(list_id, item_id, resource, rfields, record, type=None):
"""
Custom dataList item renderer for CMS Posts on the Home & News Feed pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param type: ? (@todo)
"""
record_id = record["cms_post.id"]
item_class = "thumbnail"
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.date"]
body = record["cms_post.body"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
#avatar = s3_avatar_represent(author_id,
# _class="media-object")
#avatar = A(avatar,
# _href=person_url,
# _class="pull-left",
# )
# Use Organisation Logo
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
else:
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
if current.request.controller == "default":
# Mixed resource lists (Home, News Feed)
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Alert":
# Apply additional highlighting for Alerts
item_class = "%s disaster" % item_class
else:
card_label = SPAN(" ", _class="card-title")
# Render the item
if "newsfeed" not in current.request.args and series == "Event":
item = DIV(DIV(SPAN(date,
_class="date-title event",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
else:
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# For access from custom controllers
s3.render_posts = render_posts
# -----------------------------------------------------------------------------
def render_profile_posts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for CMS Posts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cms_post.id"]
item_class = "thumbnail"
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.date"]
body = record["cms_post.body"]
event_id = raw["event_post.event_id"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
#avatar = s3_avatar_represent(author_id,
# _class="media-object")
#avatar = A(avatar,
# _href=person_url,
# _class="pull-left",
# )
# Use Organisation Logo
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
else:
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
#_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
T = current.T
vars = {"refresh": list_id,
"record": record_id,
"~.series_id$name": series,
}
f = current.request.function
if f == "event" and event_id:
vars["(event)"] = event_id
if f == "location" and location_id:
vars["(location)"] = location_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
# Render the item
class SMALL(DIV):
tag = "small"
item = DIV(DIV(DIV(avatar,
_class="span1"),
DIV(SPAN(A(location,
_href=location_url,
),
_class="location-title"),
" ",
SPAN(date,
_class="date-title"),
edit_bar,
P(body,
_class="card_comments"),
P(SMALL(" ", author, " ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
),
_class="citation"),
docs,
_class="span5 card-details"),
_class="row",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_projects(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Projects on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["project_project.id"]
item_class = "thumbnail"
raw = record._row
name = record["project_project.name"]
author = record["project_project.modified_by"]
author_id = raw["project_project.modified_by"]
contact = record["project_project.human_resource_id"]
date = record["project_project.modified_on"]
organisation = record["project_project.organisation_id"]
organisation_id = raw["project_project.organisation_id"]
location = record["project_location.location_id"]
location_ids = raw["project_location.location_id"]
if isinstance(location_ids, list):
locations = location.split(",")
locations_list = []
length = len(location_ids)
i = 0
for location_id in location_ids:
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
locations_list.append(A(locations[i], _href=location_url))
i += 1
if i != length:
locations_list.append(",")
else:
location_url = URL(c="gis", f="location",
args=[location_ids, "profile"])
locations_list = [A(location, _href=location_url)]
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
avatar = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
avatar = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
start_date = raw["project_project.start_date"] or ""
if start_date:
start_date = record["project_project.start_date"]
end_date = raw["project_project.end_date"] or ""
if end_date:
end_date = record["project_project.end_date"]
budget = record["project_project.budget"]
if budget:
budget = "USD %s" % budget
partner = record["project_partner_organisation.organisation_id"]
partner_ids = raw["project_partner_organisation.organisation_id"]
if isinstance(partner_ids, list):
partners = partner.split(",")
partners_list = []
length = len(partner_ids)
i = 0
for partner_id in partner_ids:
partner_url = URL(c="org", f="organisation",
args=[partner_id, "profile"])
partners_list.append(A(partners[i], _href=partner_url))
i += 1
if i != length:
partners_list.append(",")
elif partner_ids:
partner_url = URL(c="org", f="organisation",
args=[partner_ids, "profile"])
partners_list = [A(partner, _href=partner_url)]
else:
partners_list = [current.messages["NONE"]]
donor = record["project_donor_organisation.organisation_id"]
donor_ids = raw["project_donor_organisation.organisation_id"]
if isinstance(donor_ids, list):
donors = donor.split(",")
amounts = raw["project_donor_organisation.amount"]
if not isinstance(amounts, list):
amounts = [amounts for donor_id in donor_ids]
currencies = raw["project_donor_organisation.currency"]
if not isinstance(currencies, list):
currencies = [currencies for donor_id in donor_ids]
from s3.s3validators import IS_INT_AMOUNT
amount_represent = IS_INT_AMOUNT.represent
donors_list = []
length = len(donor_ids)
i = 0
for donor_id in donor_ids:
if donor_id:
donor_url = URL(c="org", f="organisation",
args=[donor_id, "profile"])
donor = A(donors[i], _href=donor_url)
amount = amounts[i]
if amount:
donor = TAG[""](donor,
" - ",
currency_represent(currencies[i]),
amount_represent(amount))
else:
donor = current.messages["NONE"]
donors_list.append(donor)
i += 1
if i != length:
donors_list.append(",")
elif donor_ids:
donor_url = URL(c="org", f="organisation",
args=[donor_ids, "profile"])
donors_list = [A(donor, _href=donor_url)]
else:
donors_list = [current.messages["NONE"]]
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.project_project
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
# "record not found" since multiples here
#elif f == "location" and location_ids:
# vars["(location)"] = location_ids
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="project", f="project",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_project.title_update,
)
else:
# Read in Popup
edit_btn = A(I(" ", _class="icon icon-search"),
_href=URL(c="project", f="project",
args=[record_id, "read.popup"]),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_project.title_display,
)
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = (documents,)
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
# Render the item,
body = TAG[""](P(I(_class="icon-user"),
" ",
STRONG("%s:" % T("Focal Point")),
" ",
contact,
_class="main_contact_ph"),
P(I(_class="icon-calendar"),
" ",
STRONG("%s:" % T("Start & End Date")),
" ",
T("%(start_date)s to %(end_date)s") % \
dict(start_date=start_date,
end_date = end_date),
_class="main_contact_ph"),
P(I(_class="icon-link"),
" ",
STRONG("%s:" % T("Partner")),
" ",
*partners_list,
_class="main_contact_ph"),
P(I(_class="icon-money"),
" ",
STRONG("%s:" % T("Donor")),
" ",
*donors_list,
_class="main_office-add")
)
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(*locations_list,
_class="location-title"
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_resources(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Resources on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_resource.id"]
item_class = "thumbnail"
raw = record._row
author = record["org_resource.modified_by"]
date = record["org_resource.modified_on"]
quantity = record["org_resource.value"]
resource_type = record["org_resource.parameter_id"]
body = "%s %s" % (quantity, T(resource_type))
comments = raw["org_resource.comments"]
organisation = record["org_resource.organisation_id"]
organisation_id = raw["org_resource.organisation_id"]
location = record["org_resource.location_id"]
location_id = raw["org_resource.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
logo = raw["org_organisation.logo"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_resource
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
elif f == "location" and location_id:
vars["(location)"] = location_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="resource",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_resource.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
avatar = logo
body = TAG[""](body, BR(), comments)
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def customise_cms_post_fields():
"""
Customise cms_post fields for it's own controller & for Profile pages
"""
s3db = current.s3db
from s3 import IS_LOCATION, S3LocationSelector
table = s3db.cms_post
field = table.location_id
field.label = ""
field.represent = s3db.gis_LocationRepresent(sep=" | ")
#field.requires = IS_EMPTY_OR(IS_LOCATION()) # that's the default!
field.widget = S3LocationSelector(levels=("L1", "L2", "L3"))
table.created_by.represent = s3_auth_user_represent_name
current.auth.settings.table_user.organisation_id.represent = \
s3db.org_organisation_represent
list_fields = ["series_id",
"location_id",
"date",
"body",
"created_by",
"created_by$organisation_id",
"document.file",
"event_post.event_id",
]
s3db.configure("cms_post",
list_fields = list_fields,
)
return table
# -----------------------------------------------------------------------------
def cms_post_popup(r):
"""
Customised Map popup for cms_post resource
- style like the cards
- currently unused
"""
record = r.record
pkey = "cms_post.id"
# Construct the item ID
map_id = "default_map" # @ToDo: provide the map_id as a var in order to be able to support multiple maps
record_id = record[pkey]
item_id = "%s-%s" % (map_id, record_id)
item_class = "thumbnail"
db = current.db
table = db.cms_post
series = table.series_id.represent(record.series_id)
date = table.date.represent(record.date)
body = record.body
location_id = record.location_id
location = table.location_id.represent(location_id)
location_url = URL(c="gis", f="location", args=[location_id])
author_id = record.created_by
author = table.created_by.represent(author_id)
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
utable = db.auth_user
otable = db.org_organisation
query = (utable.id == author_id) & \
(otable.id == utable.organisation_id)
row = db(query).select(otable.id,
otable.name,
otable.logo,
limitby=(0, 1)
).first()
if row:
organisation_id = row.id
organisation = row.name
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
logo = URL(c="default", f="download", args=[row.logo])
else:
organisation_id = 0
organisation = ""
org_url = ""
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
#vars={"refresh": list_id,
# "record": record_id}
),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
dtable = db.doc_document
query = (table.doc_id == dtable.doc_id) & \
(dtable.deleted == False)
documents = db(query).select(dtable.file)
if documents:
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
filename = doc.file
try:
doc_name = retrieve(filename)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[filename])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Alert":
# Apply additional highlighting for Alerts
item_class = "%s disaster" % item_class
# Render the item
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
#edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def cms_post_marker_fn(record):
"""
Function to decide which Marker to use for Posts
Alerts & Incidents vary colour by age
@ToDo: A Bulk function
Unused: Using Style instead
"""
db = current.db
s3db = current.s3db
table = s3db.cms_post
stable = db.cms_series
series = db(stable.id == record.series_id).select(stable.name,
limitby=(0, 1),
cache=s3db.cache
).first().name
if series == "Alert":
marker = "alert"
elif series == "Activity":
marker = "activity"
elif series == "Assessment":
marker = "assessment"
#elif series == "Event":
# marker = "event"
elif series == "Incident":
marker = "incident"
#elif series == "Plan":
# marker = "plan"
elif series == "Report":
marker = "report"
elif series == "Training Material":
marker = "training"
if series in ("Alert", "Incident"):
# Colour code by open/priority requests
date = record.date
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
marker = "%s_red" % marker
elif age < timedelta(days=7):
marker = "%s_yellow" % marker
else:
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "marker_red").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# =============================================================================
def cms_post_age(row):
"""
The age of the post
- used for colour-coding markers of Alerts & Incidents
"""
if hasattr(row, "cms_post"):
row = row.cms_post
try:
date = row.date
except:
# not available
return current.messages["NONE"]
now = current.request.utcnow
age = now - date
if age < timedelta(days=2):
return 1
elif age < timedelta(days=7):
return 2
else:
return 3
# -----------------------------------------------------------------------------
def customise_cms_post_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
#s3db.configure("cms_post",
# marker_fn=cms_post_marker_fn,
# )
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
# Called first so that we can unhide the Type field
result = standard_prep(r)
if not result:
return False
if r.interactive:
table = customise_cms_post_fields()
get_vars = current.request.get_vars
field = table.series_id
field.label = T("Type")
if r.method == "read":
# Restore the label for the Location
table.location_id.label = T("Location")
elif r.method == "create":
ADMIN = current.session.s3.system_roles.ADMIN
if (not current.auth.s3_has_role(ADMIN)):
represent = S3Represent(lookup="cms_series",
translate=settings.get_L10n_translate_cms_series())
field.requires = IS_ONE_OF(current.db,
"cms_series.id",
represent,
not_filterby="name",
not_filter_opts = ("Alert",),
)
refresh = get_vars.get("refresh", None)
if refresh == "datalist":
# We must be coming from the News Feed page so can change the type on-the-fly
field.readable = field.writable = True
#field.requires = field.requires.other
#field = table.name
#field.readable = field.writable = False
#field = table.title
#field.readable = field.writable = False
field = table.avatar
field.default = True
#field.readable = field.writable = False
field = table.replies
field.default = False
#field.readable = field.writable = False
field = table.body
field.label = T("Description")
# Plain text not Rich
from s3.s3widgets import s3_comments_widget
field.widget = s3_comments_widget
#table.comments.readable = table.comments.writable = False
if current.request.controller == "default":
# Don't override card layout for News Feed/Homepage
return True
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
# Filter from a Profile page?
# If so, then default the fields we know
location_id = get_vars.get("~.(location)", None)
if location_id:
table.location_id.default = location_id
event_id = get_vars.get("~.(event)", None)
if event_id:
crud_form = S3SQLCustomForm(
"date",
"series_id",
"body",
"location_id",
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
)
def create_onaccept(form):
current.s3db.event_post.insert(event_id=event_id,
post_id=form.vars.id)
s3db.configure("cms_post",
create_onaccept = create_onaccept,
)
else:
crud_form = S3SQLCustomForm(
"date",
"series_id",
"body",
"location_id",
S3SQLInlineComponent(
"event_post",
#label = T("Disaster(s)"),
label = T("Disaster"),
multiple = False,
fields = [("", "event_id")],
orderby = "event_id$name",
),
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = [("", "file"),
#"comments",
],
),
)
# Return to List view after create/update/delete
# We now do all this in Popups
#url_next = URL(c="default", f="index", args="newsfeed")
s3db.configure("cms_post",
#create_next = url_next,
#delete_next = url_next,
#update_next = url_next,
crud_form = crud_form,
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_posts,
)
# This is awful in Popups & it breaks the styling of the main Save button
#s3.cancel = URL(c="cms", f="post")
elif r.representation == "xls":
table = r.table
table.created_by.represent = s3_auth_user_represent_name
#table.created_on.represent = datetime_represent
utable = current.auth.settings.table_user
utable.organisation_id.represent = s3db.org_organisation_represent
list_fields = [(T("Date"), "date"),
(T("Disaster"), "event_post.event_id"),
(T("Type"), "series_id"),
(T("Details"), "body"),
(T("District"), "location_id$L1"),
(T("Sub-District"), "location_id$L2"),
(T("Suco"), "location_id$L3"),
(T("Author"), "created_by"),
(T("Organization"), "created_by$organisation_id"),
]
s3db.configure("cms_post",
list_fields = list_fields,
)
elif r.representation == "plain":
# Map Popups
table = r.table
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.created_by.represent = s3_auth_user_represent_name
# Used by default popups
series = table.series_id.represent(r.record.series_id)
s3.crud_strings["cms_post"].title_display = "%(series)s Details" % dict(series=series)
s3db.configure("cms_post",
popup_url="",
)
table.avatar.readable = False
table.body.label = ""
table.expired.readable = False
table.replies.readable = False
table.created_by.readable = True
table.created_by.label = T("Author")
# Used by cms_post_popup
#table.created_on.represent = datetime_represent
elif r.representation == "geojson":
r.table.age = Field.Method("age", cms_post_age)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("cms_post")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("cms_post")
elif r.representation == "plain":
# Map Popups
#output = cms_post_popup(r)
pass
return output
s3.postp = custom_postp
return attr
settings.customise_cms_post_controller = customise_cms_post_controller
# -----------------------------------------------------------------------------
def customise_event_event_controller(**attr):
"""
Customise event_event controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
s3.crud_strings["event_event"] = Storage(
label_create = T("New Disaster"),
title_display = T("Disaster Details"),
title_list = T("Disasters"),
title_update = T("Edit Disaster"),
label_list_button = T("List Disasters"),
label_delete_button = T("Delete Disaster"),
msg_record_created = T("Disaster added"),
msg_record_modified = T("Disaster updated"),
msg_record_deleted = T("Disaster deleted"),
msg_list_empty = T("No Disasters currently registered"))
db = current.db
s3db = current.s3db
# Load normal Model
table = s3db.event_event
table.exercise.label = T("Is this an Exercise?")
table.start_date.label = T("Start Time")
if r.method =="datalist":
# Disaster selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
elif r.method == "profile":
# Customise the cms_post table as that is used for the widgets
customise_cms_post_fields()
gtable = db.gis_location
ltable = db.event_event_location
query = (ltable.event_id == r.id) & \
(ltable.location_id == gtable.id)
location = db(query).select(gtable.id,
gtable.lat_max,
gtable.lon_max,
gtable.lat_min,
gtable.lon_min,
limitby=(0, 1)).first()
if location:
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
}
default = "~.(location)=%s" % location.id
else:
# Default bounds
bbox = {}
# No default Location
default = None
map_widget = dict(label = "Map",
type = "map",
context = "event",
icon = "icon-map",
height = 383,
width = 568,
bbox = bbox,
)
alerts_widget = dict(label = "Alerts",
label_create = "Create Alert",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Alert",
icon = "alert",
layer = "Alerts",
# provided by Catalogue Layer
#marker = "alert",
list_layout = render_profile_posts,
)
incidents_widget = dict(label = "Incidents",
label_create = "Create Incident",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Incident",
icon = "incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
assessments_widget = dict(label = "Assessments",
label_create = "Create Assessment",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Assessment",
icon = "assessment",
layer = "Assessments",
# provided by Catalogue Layer
#marker = "assessment",
list_layout = render_profile_posts,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "event",
default = default,
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
#comments_widget = dict(label = "Comments",
# type = "comments",
# icon = "comments-alt",
# colspan = 2,
# )
record = r.record
ttable = db.event_event_type
event_type = db(ttable.id == record.event_type_id).select(ttable.name,
limitby=(0, 1),
).first().name
s3db.configure("event_event",
profile_title = "%s : %s" % (s3.crud_strings["event_event"].title_list,
record.name),
profile_header = DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="img",
args=["event",
"%s.png" % event_type]),
),
_class="pull-left",
#_href=event_url,
),
H2(record.name),
#P(record.comments),
_class="profile-header",
),
profile_widgets = [alerts_widget,
map_widget,
incidents_widget,
assessments_widget,
activities_widget,
reports_widget,
#comments_widget,
])
# Include a Location inline
location_field = s3db.event_event_location.location_id
# Don't label a single field InlineComponent
location_field.label = ""
represent = S3Represent(lookup="gis_location")
location_field.represent = represent
# L1s only
location_field.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "gis_location.id",
represent,
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm(
"name",
"event_type_id",
"exercise",
"start_date",
"closed",
S3SQLInlineComponent(
"event_location",
label = T("District"),
multiple = False,
fields = ["location_id"],
),
"comments",
)
s3db.configure("event_event",
create_next = URL(c="event", f="event",
args=["[id]", "profile"]),
crud_form = crud_form,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_layout = render_events,
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="event", f="event",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Disaster"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
s3.crud_strings["gis_location"].title_list = T("Districts")
if r.method == "datalist":
# District selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Just show L1s (Districts)
s3.filter = (table.level == "L1")
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 13
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
]
s3db.configure("gis_location",
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_cms_post_fields()
s3db.org_customise_org_resource_fields("profile")
customise_project_project_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
default = "~.(location)=%s" % location.id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
resources_widget = dict(label = "Resources",
label_create = "Create Resource",
type = "datalist",
tablename = "org_resource",
context = "location",
default = default,
icon = "resource",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_resources,
)
incidents_widget = dict(label = "Incidents",
label_create = "Create Incident",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = (FS("series_id$name") == "Incident") & (FS("expired") == False),
icon = "incident",
layer = "Incidents",
# provided by Catalogue Layer
#marker = "incident",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
projects_widget = dict(label = "Projects",
label_create = "Create Project",
type = "datalist",
tablename = "project_project",
context = "location",
default = default,
icon = "project",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_projects,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "location",
default = default,
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
name = location.name
# https://code.google.com/p/web2py/issues/detail?id=1533
public_url = current.deployment_settings.get_base_public_url()
if public_url.startswith("http://127.0.0.1"):
# Assume Rocket
image = quote_unicode(s3_unicode(name))
else:
# Assume Apache or Cherokee
image = s3_unicode(name)
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(A(IMG(_class="media-object",
_src="%s/%s.png" % (URL(c="static",
f="themes",
args=["DRMP", "img"]),
image),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile-header",
),
profile_widgets = [#locations_widget,
resources_widget,
map_widget,
incidents_widget,
reports_widget,
projects_widget,
activities_widget,
],
)
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_office_fields():
"""
Customise org_office for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.org_office
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["name",
"organisation_id",
"office_type_id",
"location_id",
"location_id$addr_street",
"modified_by",
"modified_on",
"organisation_id$logo",
]
s3db.configure("org_office",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_office_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_office
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_org_office_fields()
s3db.configure("org_office",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_offices,
)
elif r.interactive or r.representation == "aadata":
# Configure fields
table.code.readable = table.code.writable = False
#table.office_type_id.readable = table.office_type_id.writable = False
table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
table.fax.readable = table.fax.writable = False
location_field = table.location_id
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
# L1s only
from s3 import IS_LOCATION, S3LocationSelector
location_field.requires = IS_LOCATION()
location_field.widget = S3LocationSelector(levels=("L1", "L2"),
show_address=True,
show_map=False,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="org", f="office")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="office",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_office")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_office")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_office_controller = customise_org_office_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
ADD_ORGANISATION = T("New Stakeholder")
s3.crud_strings["org_organisation"] = Storage(
label_create = ADD_ORGANISATION,
title_display = T("Stakeholder Details"),
title_list = T("Stakeholders"),
title_update = T("Edit Stakeholder"),
label_list_button = T("List Stakeholders"),
label_delete_button = T("Delete Stakeholder"),
msg_record_created = T("Stakeholder added"),
msg_record_modified = T("Stakeholder updated"),
msg_record_deleted = T("Stakeholder deleted"),
msg_list_empty = T("No Stakeholders currently registered"))
list_fields = ["id",
"name",
"logo",
"phone",
]
s3db = current.s3db
if r.method == "profile":
# Customise tables used by widgets
customise_cms_post_fields()
customise_hrm_human_resource_fields()
customise_org_office_fields()
s3db.org_customise_org_resource_fields("profile")
customise_project_project_fields()
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
offices_widget = dict(label = "Offices",
label_create = "Create Office",
type = "datalist",
tablename = "org_office",
context = "organisation",
icon = "home",
layer = "Offices",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_offices,
)
resources_widget = dict(label = "Resources",
label_create = "Create Resource",
type = "datalist",
tablename = "org_resource",
context = "organisation",
icon = "resource",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_resources,
)
projects_widget = dict(label = "Projects",
label_create = "Create Project",
type = "datalist",
tablename = "project_project",
context = "organisation",
icon = "project",
show_on_map = False, # No Marker yet & only show at L1-level anyway
list_layout = render_projects,
)
activities_widget = dict(label = "Activities",
label_create = "Create Activity",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Activity",
icon = "activity",
layer = "Activities",
# provided by Catalogue Layer
#marker = "activity",
list_layout = render_profile_posts,
)
reports_widget = dict(label = "Reports",
label_create = "Create Report",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Report",
icon = "report",
layer = "Reports",
# provided by Catalogue Layer
#marker = "report",
list_layout = render_profile_posts,
)
assessments_widget = dict(label = "Assessments",
label_create = "Create Assessment",
type = "datalist",
tablename = "cms_post",
context = "organisation",
filter = FS("series_id$name") == "Assessment",
icon = "assessment",
layer = "Assessments",
# provided by Catalogue Layer
#marker = "assessment",
list_layout = render_profile_posts,
)
record = r.record
if record.logo:
logo = URL(c="default", f="download", args=[record.logo])
else:
logo = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(record.name),
_class="profile-header",
),
profile_widgets = [contacts_widget,
map_widget,
offices_widget,
resources_widget,
projects_widget,
activities_widget,
reports_widget,
assessments_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Add a component of just National offices for the Org address
ottable = s3db.org_office_type
query = (ottable.name == "National")
national = current.db(query).select(ottable.id,
limitby=(0, 1)
).first()
if national:
national = national.id
s3db.add_components("org_organisation",
org_office = {"name": "nat_office",
"joinby": "organisation_id",
"filterby": "office_type_id",
"filterfor": (national,),
},
)
list_fields.append("nat_office.location_id$addr_street")
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Load normal Model
table = s3db.org_organisation
# Hide fields
field = s3db.org_organisation_organisation_type.organisation_type_id
field.readable = field.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_resource_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_resource
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
s3db.org_customise_org_resource_fields(r.method)
# Configure fields
#table.site_id.readable = table.site_id.readable = False
location_field = table.location_id
location_field.label = T("District")
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# L1s only
location_field.requires = IS_ONE_OF(current.db, "gis_location.id",
S3Represent(lookup="gis_location"),
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="resource")
s3db.configure("org_resource",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_resources,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="org", f="resource")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="resource",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="org", f="resource",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="org", f="resource",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_resource")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_resource")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_resource_controller = customise_org_resource_controller
# -----------------------------------------------------------------------------
#def customise_org_resource_type_controller(**attr):
# table = current.s3db.org_resource_type
# table.name.represent = lambda v: T(v) if v else ""
# table.comments.label = T("Units")
# table.comments.represent = lambda v: T(v) if v else ""
# return attr
#settings.customise_org_resource_type_controller = customise_org_resource_type_controller
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3AddResourceLink
site_field.comment = S3AddResourceLink(c="org", f="office",
vars={"child": "site_id"},
label=T("Create Office"),
title=T("Office"),
tooltip=T("If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'."))
# ImageCrop widget doesn't currently work within an Inline Form
image_field = s3db.pr_image.image
from gluon.validators import IS_IMAGE
image_field.requires = IS_IMAGE()
image_field.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
s3_sql_custom_fields = ["first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Office"), "human_resource.site_id"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
delete_next = url_next,
update_next = url_next,
crud_form = crud_form,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_project_project_fields():
"""
Customise project_project fields for Profile widgets and 'more' popups
"""
format = "%d/%m/%y"
date_represent = lambda d: S3DateTime.date_represent(d, format=format)
s3db = current.s3db
s3db.project_location.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table = s3db.project_project
table.objectives.readable = table.objectives.writable = True
table.start_date.represent = date_represent
table.end_date.represent = date_represent
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["name",
"organisation_id",
"location.location_id",
"organisation_id$logo",
"start_date",
"end_date",
"human_resource_id",
"budget",
"partner.organisation_id",
"donor.organisation_id",
"donor.amount",
"donor.currency",
"modified_by",
"modified_on",
"document.file",
]
s3db.configure("project_project",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Remove rheader
attr["rheader"] = None
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
s3db = current.s3db
table = s3db.project_project
if r.method == "datalist":
customise_project_project_fields()
s3db.configure("project_project",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_projects,
)
elif r.interactive or r.representation == "aadata":
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
organisation_id = get_vars.get("~.(organisation)", None)
if not organisation_id:
user = current.auth.user
if user:
organisation_id = user.organisation_id
# Configure fields
table.objectives.readable = table.objectives.writable = True
table.human_resource_id.label = T("Focal Person")
s3db.hrm_human_resource.organisation_id.default = organisation_id
table.budget.label = "%s (USD)" % T("Budget")
# Better in column label & otherwise this construction loses thousands separators
#table.budget.represent = lambda value: "%d USD" % value
s3db.doc_document.file.label = ""
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
crud_form_fields = [
"name",
S3SQLInlineComponentMultiSelectWidget(
"theme",
label = T("Themes"),
field = "theme_id",
option_help = "comments",
cols = 3,
),
S3SQLInlineComponent(
"location",
label = T("Districts"),
fields = ["location_id"],
orderby = "location_id$name",
render_list = True
),
"description",
"human_resource_id",
"start_date",
"end_date",
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
],
filterby = dict(field = "role",
options = "2"
)
),
# Donors
S3SQLInlineComponent(
"organisation",
name = "donor",
label = T("Donor(s)"),
fields = ["organisation_id", "amount", "currency"],
filterby = dict(field = "role",
options = "3"
)
),
"budget",
"objectives",
# Files
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = ["file",
#"comments"
],
),
"comments",
]
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
else:
crud_form_fields.insert(1, "organisation_id")
location_field = s3db.project_location.location_id
location_id = get_vars.get("~.(location)", None)
if location_id:
# Default to this Location, but allow selection of others
location_field.default = location_id
location_field.label = ""
represent = S3Represent(lookup="gis_location")
location_field.represent = represent
# Project Locations must be districts
location_field.requires = IS_ONE_OF(current.db, "gis_location.id",
represent,
sort = True,
filterby = "level",
filter_opts = ("L1",)
)
# Don't add new Locations here
location_field.comment = None
# Simple dropdown
location_field.widget = None
crud_form = S3SQLCustomForm(*crud_form_fields)
list_fields = ["name",
"organisation_id",
"human_resource_id",
(T("Districts"), "location.location_id"),
"start_date",
"end_date",
"budget",
]
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="project", f="project")
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
S3TextFilter(["name",
"description",
"location.location_id",
"theme.name",
"objectives",
"comments"
],
label = T("Search Projects"),
),
S3OptionsFilter("organisation_id",
label = T("Lead Organization"),
),
S3OptionsFilter("location.location_id$L1",
),
S3OptionsFilter("partner.organisation_id",
label = T("Partners"),
),
S3OptionsFilter("donor.organisation_id",
label = T("Donors"),
)
]
s3db.configure("project_project",
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
filter_widgets = filter_widgets,
list_fields = list_fields,
update_next = url_next,
)
# This is awful in Popups & inconsistent in dataTable view (People/Documents don't have this & it breaks the styling of the main Save button)
#s3.cancel = URL(c="project", f="project")
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="project", f="project",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="project", f="project",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="project", f="project",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("project_project")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("project_project")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed & Projects
current.response.s3.filter = (table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
table.organisation_id.readable = True
table.organisation_id.writable = True
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
from s3.s3forms import S3SQLCustomForm
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# =============================================================================
# Template Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("event", Storage(
name_nice = "Disasters",
#description = "Events",
restricted = True,
module_type = None
)),
("project", Storage(
name_nice = "Projects",
restricted = True,
module_type = None
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
("vulnerability", Storage(
name_nice = "Vulnerability",
restricted = True,
module_type = None
)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
])
| collective/eden | private/templates/DRMP/config.py | Python | mit | 166,118 |
"""
Parser and evaluator for FormulaResponse and NumericalResponse
Uses pyparsing to parse. Main function as of now is evaluator().
"""
import math
import numbers
import operator
import numpy
import scipy.constants
from pyparsing import (
CaselessLiteral,
Combine,
Forward,
Group,
Literal,
MatchFirst,
Optional,
ParseResults,
Suppress,
Word,
ZeroOrMore,
alphanums,
alphas,
nums,
stringEnd
)
import functions
# Functions available by default
# We use scimath variants which give complex results when needed. For example:
# np.sqrt(-4+0j) = 2j
# np.sqrt(-4) = nan, but
# np.lib.scimath.sqrt(-4) = 2j
DEFAULT_FUNCTIONS = {
'sin': numpy.sin,
'cos': numpy.cos,
'tan': numpy.tan,
'sec': functions.sec,
'csc': functions.csc,
'cot': functions.cot,
'sqrt': numpy.lib.scimath.sqrt,
'log10': numpy.lib.scimath.log10,
'log2': numpy.lib.scimath.log2,
'ln': numpy.lib.scimath.log,
'exp': numpy.exp,
'arccos': numpy.lib.scimath.arccos,
'arcsin': numpy.lib.scimath.arcsin,
'arctan': numpy.arctan,
'arcsec': functions.arcsec,
'arccsc': functions.arccsc,
'arccot': functions.arccot,
'abs': numpy.abs,
'fact': math.factorial,
'factorial': math.factorial,
'sinh': numpy.sinh,
'cosh': numpy.cosh,
'tanh': numpy.tanh,
'sech': functions.sech,
'csch': functions.csch,
'coth': functions.coth,
'arcsinh': numpy.arcsinh,
'arccosh': numpy.arccosh,
'arctanh': numpy.lib.scimath.arctanh,
'arcsech': functions.arcsech,
'arccsch': functions.arccsch,
'arccoth': functions.arccoth
}
DEFAULT_VARIABLES = {
'i': numpy.complex(0, 1),
'j': numpy.complex(0, 1),
'e': numpy.e,
'pi': numpy.pi,
'k': scipy.constants.k, # Boltzmann: 1.3806488e-23 (Joules/Kelvin)
'c': scipy.constants.c, # Light Speed: 2.998e8 (m/s)
'T': 298.15, # Typical room temperature: 298.15 (Kelvin), same as 25C/77F
'q': scipy.constants.e # Fund. Charge: 1.602176565e-19 (Coulombs)
}
# We eliminated the following extreme suffixes:
# P (1e15), E (1e18), Z (1e21), Y (1e24),
# f (1e-15), a (1e-18), z (1e-21), y (1e-24)
# since they're rarely used, and potentially confusing.
# They may also conflict with variables if we ever allow e.g.
# 5R instead of 5*R
SUFFIXES = {
'%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
'c': 1e-2, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12
}
class UndefinedVariable(Exception):
"""
Indicate when a student inputs a variable which was not expected.
"""
pass
def lower_dict(input_dict):
"""
Convert all keys in a dictionary to lowercase; keep their original values.
Keep in mind that it is possible (but not useful?) to define different
variables that have the same lowercase representation. It would be hard to
tell which is used in the final dict and which isn't.
"""
return {k.lower(): v for k, v in input_dict.iteritems()}
# The following few functions define evaluation actions, which are run on lists
# of results from each parse component. They convert the strings and (previously
# calculated) numbers into the number that component represents.
def super_float(text):
"""
Like float, but with SI extensions. 1k goes to 1000.
"""
if text[-1] in SUFFIXES:
return float(text[:-1]) * SUFFIXES[text[-1]]
else:
return float(text)
def eval_number(parse_result):
"""
Create a float out of its string parts.
e.g. [ '7.13', 'e', '3' ] -> 7130
Calls super_float above.
"""
return super_float("".join(parse_result))
def eval_atom(parse_result):
"""
Return the value wrapped by the atom.
In the case of parenthesis, ignore them.
"""
# Find first number in the list
result = next(k for k in parse_result if isinstance(k, numbers.Number))
return result
def eval_power(parse_result):
"""
Take a list of numbers and exponentiate them, right to left.
e.g. [ 2, 3, 2 ] -> 2^3^2 = 2^(3^2) -> 512
(not to be interpreted (2^3)^2 = 64)
"""
# `reduce` will go from left to right; reverse the list.
parse_result = reversed(
[k for k in parse_result
if isinstance(k, numbers.Number)] # Ignore the '^' marks.
)
# Having reversed it, raise `b` to the power of `a`.
power = reduce(lambda a, b: b ** a, parse_result)
return power
def eval_parallel(parse_result):
"""
Compute numbers according to the parallel resistors operator.
BTW it is commutative. Its formula is given by
out = 1 / (1/in1 + 1/in2 + ...)
e.g. [ 1, 2 ] -> 2/3
Return NaN if there is a zero among the inputs.
"""
if len(parse_result) == 1:
return parse_result[0]
if 0 in parse_result:
return float('nan')
reciprocals = [1. / e for e in parse_result
if isinstance(e, numbers.Number)]
return 1. / sum(reciprocals)
def eval_sum(parse_result):
"""
Add the inputs, keeping in mind their sign.
[ 1, '+', 2, '-', 3 ] -> 0
Allow a leading + or -.
"""
total = 0.0
current_op = operator.add
for token in parse_result:
if token == '+':
current_op = operator.add
elif token == '-':
current_op = operator.sub
else:
total = current_op(total, token)
return total
def eval_product(parse_result):
"""
Multiply the inputs.
[ 1, '*', 2, '/', 3 ] -> 0.66
"""
prod = 1.0
current_op = operator.mul
for token in parse_result:
if token == '*':
current_op = operator.mul
elif token == '/':
current_op = operator.truediv
else:
prod = current_op(prod, token)
return prod
def add_defaults(variables, functions, case_sensitive):
"""
Create dictionaries with both the default and user-defined variables.
"""
all_variables = dict(DEFAULT_VARIABLES)
all_functions = dict(DEFAULT_FUNCTIONS)
all_variables.update(variables)
all_functions.update(functions)
if not case_sensitive:
all_variables = lower_dict(all_variables)
all_functions = lower_dict(all_functions)
return (all_variables, all_functions)
def evaluator(variables, functions, math_expr, case_sensitive=False):
"""
Evaluate an expression; that is, take a string of math and return a float.
-Variables are passed as a dictionary from string to value. They must be
python numbers.
-Unary functions are passed as a dictionary from string to function.
"""
# No need to go further.
if math_expr.strip() == "":
return float('nan')
# Parse the tree.
math_interpreter = ParseAugmenter(math_expr, case_sensitive)
math_interpreter.parse_algebra()
# Get our variables together.
all_variables, all_functions = add_defaults(variables, functions, case_sensitive)
# ...and check them
math_interpreter.check_variables(all_variables, all_functions)
# Create a recursion to evaluate the tree.
if case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
evaluate_actions = {
'number': eval_number,
'variable': lambda x: all_variables[casify(x[0])],
'function': lambda x: all_functions[casify(x[0])](x[1]),
'atom': eval_atom,
'power': eval_power,
'parallel': eval_parallel,
'product': eval_product,
'sum': eval_sum
}
return math_interpreter.reduce_tree(evaluate_actions)
class ParseAugmenter(object):
"""
Holds the data for a particular parse.
Retains the `math_expr` and `case_sensitive` so they needn't be passed
around method to method.
Eventually holds the parse tree and sets of variables as well.
"""
def __init__(self, math_expr, case_sensitive=False):
"""
Create the ParseAugmenter for a given math expression string.
Do the parsing later, when called like `OBJ.parse_algebra()`.
"""
self.case_sensitive = case_sensitive
self.math_expr = math_expr
self.tree = None
self.variables_used = set()
self.functions_used = set()
def vpa(tokens):
"""
When a variable is recognized, store it in `variables_used`.
"""
varname = tokens[0][0]
self.variables_used.add(varname)
def fpa(tokens):
"""
When a function is recognized, store it in `functions_used`.
"""
varname = tokens[0][0]
self.functions_used.add(varname)
self.variable_parse_action = vpa
self.function_parse_action = fpa
def parse_algebra(self):
"""
Parse an algebraic expression into a tree.
Store a `pyparsing.ParseResult` in `self.tree` with proper groupings to
reflect parenthesis and order of operations. Leave all operators in the
tree and do not parse any strings of numbers into their float versions.
Adding the groups and result names makes the `repr()` of the result
really gross. For debugging, use something like
print OBJ.tree.asXML()
"""
# 0.33 or 7 or .34 or 16.
number_part = Word(nums)
inner_number = (number_part + Optional("." + Optional(number_part))) | ("." + number_part)
# pyparsing allows spaces between tokens--`Combine` prevents that.
inner_number = Combine(inner_number)
# SI suffixes and percent.
number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys())
# 0.33k or 17
plus_minus = Literal('+') | Literal('-')
number = Group(
Optional(plus_minus) +
inner_number +
Optional(CaselessLiteral("E") + Optional(plus_minus) + number_part) +
Optional(number_suffix)
)
number = number("number")
# Predefine recursive variables.
expr = Forward()
# Handle variables passed in. They must start with letters/underscores
# and may contain numbers afterward.
inner_varname = Word(alphas + "_", alphanums + "_")
varname = Group(inner_varname)("variable")
varname.setParseAction(self.variable_parse_action)
# Same thing for functions.
function = Group(inner_varname + Suppress("(") + expr + Suppress(")"))("function")
function.setParseAction(self.function_parse_action)
atom = number | function | varname | "(" + expr + ")"
atom = Group(atom)("atom")
# Do the following in the correct order to preserve order of operation.
pow_term = atom + ZeroOrMore("^" + atom)
pow_term = Group(pow_term)("power")
par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k
par_term = Group(par_term)("parallel")
prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4
prod_term = Group(prod_term)("product")
sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3
sum_term = Group(sum_term)("sum")
# Finish the recursion.
expr << sum_term # pylint: disable=pointless-statement
self.tree = (expr + stringEnd).parseString(self.math_expr)[0]
def reduce_tree(self, handle_actions, terminal_converter=None):
"""
Call `handle_actions` recursively on `self.tree` and return result.
`handle_actions` is a dictionary of node names (e.g. 'product', 'sum',
etc&) to functions. These functions are of the following form:
-input: a list of processed child nodes. If it includes any terminal
nodes in the list, they will be given as their processed forms also.
-output: whatever to be passed to the level higher, and what to
return for the final node.
`terminal_converter` is a function that takes in a token and returns a
processed form. The default of `None` just leaves them as strings.
"""
def handle_node(node):
"""
Return the result representing the node, using recursion.
Call the appropriate `handle_action` for this node. As its inputs,
feed it the output of `handle_node` for each child node.
"""
if not isinstance(node, ParseResults):
# Then treat it as a terminal node.
if terminal_converter is None:
return node
else:
return terminal_converter(node)
node_name = node.getName()
if node_name not in handle_actions: # pragma: no cover
raise Exception(u"Unknown branch name '{}'".format(node_name))
action = handle_actions[node_name]
handled_kids = [handle_node(k) for k in node]
return action(handled_kids)
# Find the value of the entire tree.
return handle_node(self.tree)
def check_variables(self, valid_variables, valid_functions):
"""
Confirm that all the variables used in the tree are valid/defined.
Otherwise, raise an UndefinedVariable containing all bad variables.
"""
if self.case_sensitive:
casify = lambda x: x
else:
casify = lambda x: x.lower() # Lowercase for case insens.
# Test if casify(X) is valid, but return the actual bad input (i.e. X)
bad_vars = set(var for var in self.variables_used
if casify(var) not in valid_variables)
bad_vars.update(func for func in self.functions_used
if casify(func) not in valid_functions)
if bad_vars:
raise UndefinedVariable(' '.join(sorted(bad_vars)))
| proversity-org/edx-platform | common/lib/calc/calc/calc.py | Python | agpl-3.0 | 13,906 |
"""
MS SQL Server database backend for Django.
"""
try:
import pyodbc as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading pyodbc module: %s" % e)
import re
m = re.match(r'(\d+)\.(\d+)\.(\d+)(?:-beta(\d+))?', Database.version)
vlist = list(m.groups())
if vlist[3] is None: vlist[3] = '9999'
pyodbc_ver = tuple(map(int, vlist))
if pyodbc_ver < (2, 0, 38, 9999):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("pyodbc 2.0.38 or newer is required; you have %s" % Database.version)
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.conf import settings
from django import VERSION as DjangoVersion
if DjangoVersion[:2] == (1,2) :
from django import get_version
version_str = get_version()
if 'SVN' in version_str and int(version_str.split('SVN-')[-1]) < 11952: # django trunk revision 11952 Added multiple database support.
_DJANGO_VERSION = 11
else:
_DJANGO_VERSION = 12
elif DjangoVersion[:2] == (1,1):
_DJANGO_VERSION = 11
elif DjangoVersion[:2] == (1,0):
_DJANGO_VERSION = 10
elif DjangoVersion[0] == 1:
_DJANGO_VERSION = 13
else:
_DJANGO_VERSION = 9
from sql_server.pyodbc.operations import DatabaseOperations
from sql_server.pyodbc.client import DatabaseClient
from sql_server.pyodbc.creation import DatabaseCreation
from sql_server.pyodbc.introspection import DatabaseIntrospection
import os
import warnings
warnings.filterwarnings('error', 'The DATABASE_ODBC.+ is deprecated', DeprecationWarning, __name__, 0)
collation = 'Latin1_General_CI_AS'
if hasattr(settings, 'DATABASE_COLLATION'):
warnings.warn(
"The DATABASE_COLLATION setting is going to be deprecated, use DATABASE_OPTIONS['collation'] instead.",
DeprecationWarning
)
collation = settings.DATABASE_COLLATION
elif 'collation' in settings.DATABASE_OPTIONS:
collation = settings.DATABASE_OPTIONS['collation']
deprecated = (
('DATABASE_ODBC_DRIVER', 'driver'),
('DATABASE_ODBC_DSN', 'dsn'),
('DATABASE_ODBC_EXTRA_PARAMS', 'extra_params'),
)
for old, new in deprecated:
if hasattr(settings, old):
warnings.warn(
"The %s setting is deprecated, use DATABASE_OPTIONS['%s'] instead." % (old, new),
DeprecationWarning
)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
can_use_chunked_reads = False
can_return_id_from_insert = True
#uses_savepoints = True
class DatabaseWrapper(BaseDatabaseWrapper):
drv_name = None
driver_needs_utf8 = None
MARS_Connection = False
unicode_results = False
datefirst = 7
# Collations: http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# http://msdn2.microsoft.com/en-us/library/ms179886.aspx
# T-SQL LIKE: http://msdn2.microsoft.com/en-us/library/ms179859.aspx
# Full-Text search: http://msdn2.microsoft.com/en-us/library/ms142571.aspx
# CONTAINS: http://msdn2.microsoft.com/en-us/library/ms187787.aspx
# FREETEXT: http://msdn2.microsoft.com/en-us/library/ms176078.aspx
operators = {
# Since '=' is used not only for string comparision there is no way
# to make it case (in)sensitive. It will simply fallback to the
# database collation.
'exact': '= %s',
'iexact': "= UPPER(%s)",
'contains': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ collation,
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'endswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
# TODO: remove, keep native T-SQL LIKE wildcards support
# or use a "compatibility layer" and replace '*' with '%'
# and '.' with '_'
'regex': 'LIKE %s COLLATE ' + collation,
'iregex': 'LIKE %s COLLATE ' + collation,
# TODO: freetext, full-text contains...
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if 'OPTIONS' in self.settings_dict:
self.MARS_Connection = self.settings_dict['OPTIONS'].get('MARS_Connection', False)
self.datefirst = self.settings_dict['OPTIONS'].get('datefirst', 7)
self.unicode_results = self.settings_dict['OPTIONS'].get('unicode_results', False)
if _DJANGO_VERSION >= 13:
self.features = DatabaseFeatures(self)
else:
self.features = DatabaseFeatures()
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
if _DJANGO_VERSION >= 12:
self.validation = BaseDatabaseValidation(self)
else:
self.validation = BaseDatabaseValidation()
self.connection = None
def _cursor(self):
new_conn = False
settings_dict = self.settings_dict
db_str, user_str, passwd_str, port_str = None, None, "", None
if _DJANGO_VERSION >= 12:
options = settings_dict['OPTIONS']
if settings_dict['NAME']:
db_str = settings_dict['NAME']
if settings_dict['HOST']:
host_str = settings_dict['HOST']
else:
host_str = 'localhost'
if settings_dict['USER']:
user_str = settings_dict['USER']
if settings_dict['PASSWORD']:
passwd_str = settings_dict['PASSWORD']
if settings_dict['PORT']:
port_str = settings_dict['PORT']
else:
options = settings_dict['DATABASE_OPTIONS']
if settings_dict['DATABASE_NAME']:
db_str = settings_dict['DATABASE_NAME']
if settings_dict['DATABASE_HOST']:
host_str = settings_dict['DATABASE_HOST']
else:
host_str = 'localhost'
if settings_dict['DATABASE_USER']:
user_str = settings_dict['DATABASE_USER']
if settings_dict['DATABASE_PASSWORD']:
passwd_str = settings_dict['DATABASE_PASSWORD']
if settings_dict['DATABASE_PORT']:
port_str = settings_dict['DATABASE_PORT']
if self.connection is None:
new_conn = True
if not db_str:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured('You need to specify NAME in your Django settings file.')
cstr_parts = []
if 'driver' in options:
driver = options['driver']
else:
if os.name == 'nt':
driver = 'SQL Server'
else:
driver = 'FreeTDS'
if 'dsn' in options:
cstr_parts.append('DSN=%s' % options['dsn'])
else:
# Only append DRIVER if DATABASE_ODBC_DSN hasn't been set
cstr_parts.append('DRIVER={%s}' % driver)
if os.name == 'nt' or driver == 'FreeTDS' and \
options.get('host_is_server', False):
if port_str:
host_str += ';PORT=%s' % port_str
cstr_parts.append('SERVER=%s' % host_str)
else:
cstr_parts.append('SERVERNAME=%s' % host_str)
if user_str:
cstr_parts.append('UID=%s;PWD=%s' % (user_str, passwd_str))
else:
if driver in ('SQL Server', 'SQL Native Client'):
cstr_parts.append('Trusted_Connection=yes')
else:
cstr_parts.append('Integrated Security=SSPI')
cstr_parts.append('DATABASE=%s' % db_str)
if self.MARS_Connection:
cstr_parts.append('MARS_Connection=yes')
if 'extra_params' in options:
cstr_parts.append(options['extra_params'])
connstr = ';'.join(cstr_parts)
autocommit = options.get('autocommit', False)
if self.unicode_results:
self.connection = Database.connect(connstr, \
autocommit=autocommit, \
unicode_results='True')
else:
self.connection = Database.connect(connstr, \
autocommit=autocommit)
connection_created.send(sender=self.__class__)
cursor = self.connection.cursor()
if new_conn:
# Set date format for the connection. Also, make sure Sunday is
# considered the first day of the week (to be consistent with the
# Django convention for the 'week_day' Django lookup) if the user
# hasn't told us otherwise
cursor.execute("SET DATEFORMAT ymd; SET DATEFIRST %s" % self.datefirst)
if self.ops.sql_server_ver < 2005:
self.creation.data_types['TextField'] = 'ntext'
self.features.can_return_id_from_insert = False
if self.driver_needs_utf8 is None:
self.driver_needs_utf8 = True
self.drv_name = self.connection.getinfo(Database.SQL_DRIVER_NAME).upper()
if self.drv_name in ('SQLSRV32.DLL', 'SQLNCLI.DLL', 'SQLNCLI10.DLL'):
self.driver_needs_utf8 = False
# http://msdn.microsoft.com/en-us/library/ms131686.aspx
if self.ops.sql_server_ver >= 2005 and self.drv_name in ('SQLNCLI.DLL', 'SQLNCLI10.DLL') and self.MARS_Connection:
# How to to activate it: Add 'MARS_Connection': True
# to the DATABASE_OPTIONS dictionary setting
self.features.can_use_chunked_reads = True
# FreeTDS can't execute some sql queries like CREATE DATABASE etc.
# in multi-statement, so we need to commit the above SQL sentence(s)
# to avoid this
if self.drv_name.startswith('LIBTDSODBC') and not self.connection.autocommit:
self.connection.commit()
return CursorWrapper(cursor, self.driver_needs_utf8)
class CursorWrapper(object):
"""
A wrapper around the pyodbc's cursor that takes in account a) some pyodbc
DB-API 2.0 implementation and b) some common ODBC driver particularities.
"""
def __init__(self, cursor, driver_needs_utf8):
self.cursor = cursor
self.driver_needs_utf8 = driver_needs_utf8
self.last_sql = ''
self.last_params = ()
def format_sql(self, sql, n_params=None):
if self.driver_needs_utf8 and isinstance(sql, unicode):
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to encode the SQL clause itself in utf-8
sql = sql.encode('utf-8')
# pyodbc uses '?' instead of '%s' as parameter placeholder.
if n_params is not None:
sql = sql % tuple('?' * n_params)
else:
if '%s' in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
if isinstance(p, unicode):
if self.driver_needs_utf8:
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to encode parameters in utf-8
fp.append(p.encode('utf-8'))
else:
fp.append(p)
elif isinstance(p, str):
if self.driver_needs_utf8:
# TODO: use system encoding when calling decode()?
fp.append(p.decode('utf-8').encode('utf-8'))
else:
fp.append(p)
elif isinstance(p, type(True)):
if p:
fp.append(1)
else:
fp.append(0)
else:
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
self.last_sql = sql
sql = self.format_sql(sql, len(params))
params = self.format_params(params)
self.last_params = params
return self.cursor.execute(sql, params)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# pyodbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
return self.cursor.executemany(sql, params_list)
def format_results(self, rows):
"""
Decode data coming from the database if needed and convert rows to tuples
(pyodbc Rows are not sliceable).
"""
if not self.driver_needs_utf8:
return tuple(rows)
# FreeTDS (and other ODBC drivers?) doesn't support Unicode
# yet, so we need to decode utf-8 data coming from the DB
fr = []
for row in rows:
if isinstance(row, str):
fr.append(row.decode('utf-8'))
else:
fr.append(row)
return tuple(fr)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return []
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor) | WilliamRen/django-pyodbc | sql_server/pyodbc/base.py | Python | bsd-3-clause | 14,415 |
# -*- coding: utf-8 -*-
#在Thread和Process中,应当优选Process,因为Process更稳定,而且,Process可以分布到多台机器上,而Thread最多只能分布到同一台机器的多个CPU上。
#Python的multiprocessing模块不但支持多进程,其中managers子模块还支持把多进程分布到多台机器上。一个服务进程可以作为调度者,将任务分布到其他多个进程中,依靠网络通信。由于managers模块封装很好,不必了解网络通信的细节,就可以很容易地编写分布式多进程程序。
#举个例子:如果我们已经有一个通过Queue通信的多进程程序在同一台机器上运行,现在,由于处理任务的进程任务繁重,希望把发送任务的进程和处理任务的进程分布到两台机器上。怎么用分布式进程实现?
#原有的Queue可以继续使用,但是,通过managers模块把Queue通过网络暴露出去,就可以让其他机器的进程访问Queue了。
| SoPudge/lxfpyanswer | 41processmana.py | Python | apache-2.0 | 995 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import test_business_requirement_sale_timesheet
| OCA/business-requirement | business_requirement_sale_timesheet/tests/__init__.py | Python | agpl-3.0 | 121 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.dataproc_v1beta2.proto import jobs_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class JobControllerStub(object):
"""The JobController provides methods to manage jobs.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubmitJob = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/SubmitJob',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.GetJob = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/GetJob',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.ListJobs = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/ListJobs',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString,
)
self.UpdateJob = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/UpdateJob',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.CancelJob = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/CancelJob',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.DeleteJob = channel.unary_unary(
'/google.cloud.dataproc.v1beta2.JobController/DeleteJob',
request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class JobControllerServicer(object):
"""The JobController provides methods to manage jobs.
"""
def SubmitJob(self, request, context):
"""Submits a job to a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetJob(self, request, context):
"""Gets the resource representation for a job in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListJobs(self, request, context):
"""Lists regions/{region}/jobs in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateJob(self, request, context):
"""Updates a job in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelJob(self, request, context):
"""Starts a job cancellation request. To access the job resource
after cancellation, call
[regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
[regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteJob(self, request, context):
"""Deletes the job from the project. If the job is active, the delete fails,
and the response returns `FAILED_PRECONDITION`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JobControllerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SubmitJob': grpc.unary_unary_rpc_method_handler(
servicer.SubmitJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
'GetJob': grpc.unary_unary_rpc_method_handler(
servicer.GetJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.GetJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
'ListJobs': grpc.unary_unary_rpc_method_handler(
servicer.ListJobs,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsResponse.SerializeToString,
),
'UpdateJob': grpc.unary_unary_rpc_method_handler(
servicer.UpdateJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.UpdateJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
'CancelJob': grpc.unary_unary_rpc_method_handler(
servicer.CancelJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.CancelJobRequest.FromString,
response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString,
),
'DeleteJob': grpc.unary_unary_rpc_method_handler(
servicer.DeleteJob,
request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DeleteJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.dataproc.v1beta2.JobController', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| jonparrott/gcloud-python | dataproc/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py | Python | apache-2.0 | 6,801 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jirachat', '0003_auto_20150116_0411'),
]
operations = [
migrations.AlterField(
model_name='serverinfo',
name='timestamp',
field=models.CharField(max_length=255),
),
]
| wangzhe/jira-service | jirachat/migrations/0004_auto_20150122_0659.py | Python | gpl-2.0 | 411 |
from __future__ import print_function
import os
import vtk
# Read the data
os.chdir(os.path.dirname(__file__))
pl3d = vtk.vtkMultiBlockPLOT3DReader()
xyx_file = "volume/combxyz.bin"
q_file = "volume/combq.bin"
pl3d.SetXYZFileName(xyx_file)
pl3d.SetQFileName(q_file)
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
blocks = pl3d.GetOutput()
b0 = blocks.GetBlock(0)
# Setup VTK environment
renderer = vtk.vtkRenderer()
render_window = vtk.vtkRenderWindow()
render_window.AddRenderer(renderer)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
render_window.SetInteractor(interactor)
renderer.SetBackground(0.2,0.2,0.2)
interactor.Initialize()
# Draw Outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(b0)
outline_mapper = vtk.vtkPolyDataMapper()
outline_mapper.SetInputConnection(outline.GetOutputPort())
outline_actor = vtk.vtkActor()
outline_actor.SetMapper(outline_mapper)
outline_actor.GetProperty().SetColor(1,1,1)
renderer.AddActor(outline_actor)
renderer.ResetCamera()
# Threshold points
threshold = vtk.vtkThresholdPoints()
threshold.SetInputData(b0)
threshold.ThresholdByUpper(0.5)
# Draw arrows
arrow = vtk.vtkArrowSource()
glyphs = vtk.vtkGlyph3D()
glyphs.SetInputData(b0)
glyphs.SetSourceConnection(arrow.GetOutputPort())
glyphs.SetInputConnection(threshold.GetOutputPort())
glyphs.SetVectorModeToUseVector()
glyphs.SetScaleModeToScaleByVector()
glyphs.SetScaleFactor(0.005)
glyphs.SetColorModeToColorByVector()
# Mapper
glyph_mapper = vtk.vtkPolyDataMapper()
glyph_mapper.SetInputConnection(glyphs.GetOutputPort())
glyph_actor = vtk.vtkActor()
glyph_actor.SetMapper(glyph_mapper)
glyph_mapper.UseLookupTableScalarRangeOn()
renderer.AddActor(glyph_actor)
# Set color lookuptable
glyphs.Update()
s0,sf = glyphs.GetOutput().GetScalarRange()
lut = vtk.vtkColorTransferFunction()
lut.AddRGBPoint(s0, 1,0,0)
lut.AddRGBPoint(sf, 0,1,0)
glyph_mapper.SetLookupTable(lut)
interactor.Start() | diego0020/tutorial-vtk-pyqt | 00_read_and_draw.py | Python | mit | 2,029 |
import xbmc
import threading
import sys
from elementum.config import ONLY_CLIENT
from elementum.logger import log
from elementum.rpc import server_thread
from elementum.monitor import ElementumMonitor
from elementum.daemon import elementumd_thread
from elementum.osarch import PLATFORM
def run():
# Make sure the XBMC jsonrpc server is started.
xbmc.startServer(xbmc.SERVER_JSONRPCSERVER, True)
# Make the monitor
monitor = ElementumMonitor()
threads = [
threading.Thread(target=server_thread), # JSONRPC thread
]
if not ONLY_CLIENT and PLATFORM["fork"]:
sys.stderr.write("elementum: ONLY " + str(ONLY_CLIENT))
threads.append(threading.Thread(target=elementumd_thread, args=[monitor])) # Elementumd thread
for t in threads:
t.daemon = True
t.start()
# XBMC loop
while not xbmc.abortRequested:
xbmc.sleep(1000)
log.info("elementum: exiting elementumd")
| kreatorkodi/repository.torrentbr | plugin.video.elementum/resources/site-packages/elementum/service.py | Python | gpl-2.0 | 952 |
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
import numpy
from numpy import arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, \
allclose
from numpy.testing import *
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint(TestCase):
"""
Check integrate.odeint
"""
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert problem.verify(z, t)
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx: continue
self._do_problem(problem)
class TestOde(TestCase):
"""
Check integrate.ode
"""
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
ig = ode(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert ig.successful(), (problem, method)
assert problem.verify(array([z]), problem.stop_t), (problem, method)
def test_vode(self):
"""Check the vode solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx: continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
"""Check the zvode solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_dopri5(self):
"""Check the dopri5 solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx: continue
if problem.stiff: continue
if hasattr(problem, 'jac'): continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
"""Check the dop853 solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx: continue
if problem.stiff: continue
if hasattr(problem, 'jac'): continue
self._do_problem(problem, 'dop853')
class TestComplexOde(TestCase):
"""
Check integrate.complex_ode
"""
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
ig = complex_ode(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert ig.successful(), (problem, method)
assert problem.verify(array([z]), problem.stop_t), (problem, method)
def test_vode(self):
"""Check the vode solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_dopri5(self):
"""Check the dopri5 solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff: continue
if hasattr(problem, 'jac'): continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
"""Check the dop853 solver"""
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff: continue
if hasattr(problem, 'jac'): continue
self._do_problem(problem, 'dop853')
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2,2), float)
tmp[0,1] = 1.0
tmp[1,0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t)+self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:,0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j,2j,3j,4j,5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j*numpy.arctan(10)
return allclose(u, zs[-1,:], atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi]
#------------------------------------------------------------------------------
if __name__ == "__main__":
run_module_suite()
| stefanv/scipy3 | scipy/integrate/tests/test_integrate.py | Python | bsd-3-clause | 6,456 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
from logilab.common.compat import builtins
from logilab.common.decorators import cached
from logilab.astng.exceptions import NotFoundError, \
ASTNGBuildingException, InferenceError
from logilab.astng.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Pass, Raise, Return, Tuple, Yield, \
LookupMixIn, const_factory as cf, unpack_infer
from logilab.astng.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, copy_context, \
BUILTINS
from logilab.astng.mixins import FilterStmtsMixin
from logilab.astng.bases import Statement
from logilab.astng.manager import ASTNGManager
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = ASTNGManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astng for the builtin
module
"""
builtin_astng = MANAGER.astng_from_module(builtins)
if name == '__dict__':
return builtin_astng, ()
try:
stmts = builtin_astng.locals[name]
except KeyError:
stmts = ()
return builtin_astng, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(list(self.keys()))
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return list(self.locals.keys())
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in list(self.keys())]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return list(zip(list(self.keys()), list(self.values())))
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astng_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astng representation. It may
# be None if the representation has been built from a built-in module
file = None
# encoding of python source file, so we can get unicode out of it (python2
# only)
file_encoding = None
# the module name
name = None
# boolean for astng built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
@property
def file_stream(self):
if self.file is not None:
return open(self.file)
return None
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except ASTNGBuildingException:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return _infer_stmts(self.getattr(name, context), context, frame=self)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
def absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
absolute_import_activated = lambda self: True
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.astng_from_module_name(absmodname)
except ASTNGBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.astng_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.logilab.astng.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in list(living.__dict__.keys())
if not name.startswith('_')]
# else lookup the astng
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
# However, we can not analyse dynamically constructed __all__
try:
all = self['__all__']
except KeyError:
return [name for name in list(self.keys()) if not name.startswith('_')]
try:
explicit = next(all.assigned_stmts())
except InferenceError:
return [name for name in list(self.keys()) if not name.startswith('_')]
except AttributeError:
# not an assignment node
# XXX infer?
return [name for name in list(self.keys()) if not name.startswith('_')]
try:
# should be a Tuple/List of constant string / 1 string not allowed
return [const.value for const in explicit.elts]
except AttributeError:
return [name for name in list(self.keys()) if not name.startswith('_')]
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astng_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astng_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astng_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS
return '%s.function' % BUILTINS
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
_astng_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.decorators = None
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
# lineno is the line number of the first decorator, we want the def statement lineno
if self.decorators is not None:
self.fromlineno += sum(node.tolineno - node.lineno + 1
for node in self.decorators.nodes)
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""return true if the method is abstract
It's considered as abstract if the only statement is a raise of
NotImplementError, or, if pass_is_abstract, a pass statement
"""
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
try:
return next(self.nodes_of_class(Yield, skip_klass=(Function, Lambda)))
except StopIteration:
return False
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator()
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if klass.name == 'type':
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
# print >> sys.stderr, '_class_type', repr(klass)
for base in klass.ancestors(recurs=False):
if _class_type(base, ancestors) != 'class':
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astng_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.blockstart_tolineno = self.bases and self.bases[-1].tolineno or self.fromlineno
if lastchild is not None:
self.tolineno = lastchild.tolineno
# else this is a class with only a docstring, then tolineno is (should be) already ok
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS
return '%s.classobj' % BUILTINS
def display_type(self):
return 'Class'
def callable(self):
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# XXX inference make infinite loops possible here (see BaseTransformer
# manipulation in the builder module for instance)
yielded = set([self])
if context is None:
context = InferenceContext()
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
# duh ?
continue
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(True, context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their locals
"""
for astng in self.ancestors(context=context):
if name in astng:
yield astng
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astng in self.ancestors(context=context):
if name in astng.instance_attrs:
yield astng
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astng nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
values = self.instance_attrs.get(name, [])
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME: do we really need the actual list of ancestors?
# returning [Tuple()] + values don't break any test
# this is ticket http://www.logilab.org/ticket/52785
# XXX need proper meta class handling + MRO implementation
if name == '__bases__' or (name == '__mro__' and self.newstyle):
node = Tuple()
node.items = self.ancestors(recurs=True, context=context)
return [node] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astng in chain(iter((self,)), self.ancestors()):
for meth in astng.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in list(self.values()):
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
| tlksio/tlksio | env/lib/python3.4/site-packages/logilab/astng/scoped_nodes.py | Python | mit | 34,433 |
from lib.model import BaseModel, Field
import unittest
import time
class TestModel (BaseModel):
field_a = Field()
field_b = Field()
class TestDefaultModel (BaseModel):
field_a = Field(default='default')
field_b = Field(default='hello')
class TestRequiredModel (BaseModel):
field_a = Field(required=True)
field_b = Field(default='test', required=True)
class TestCallableDefaultModel (BaseModel):
field_a = Field(default=time.time, required=True)
class TestBaseModel (unittest.TestCase):
def setUp (self):
pass
def test_metaclass (self):
t = TestDefaultModel()
self.assertTrue('field_a' in t)
self.assertTrue('field_b' in t)
def test_constructor_field (self):
t = TestModel(**dict(
field_a = '1',
field_b = '2',
c = 'd' # should not be set as attribute as its not defined
))
self.assertTrue('c' not in t.__dict__)
def test_constructor_default (self):
t = TestModel()
self.assertTrue(t.field_a == None)
def test_descriptor_instances (self):
t = TestModel()
b = TestModel()
self.assertFalse(t == b)
t.field_a = 1
t.field_b = 2
self.assertFalse(t.field_a == b.field_a)
b.field_b = t.field_b
self.assertTrue(t.field_b == b.field_b)
def test_set_descriptor_value (self):
t = TestModel(field_a='testing', field_b='woohoo')
self.assertTrue(t.field_a == 'testing')
t.field_a = 'testing2'
self.assertTrue(t.field_a == 'testing2')
self.assertFalse(t.field_a == t.field_b)
def test_delete_descriptor_value (self):
t = TestModel(field_a='test')
self.assertTrue(t.field_a == 'test')
t.field_b = t.field_a
self.assertTrue(t.field_a == t.field_b)
del t.field_a
self.assertTrue(t.field_a == None)
self.assertTrue(t.field_b == 'test')
self.assertFalse(t.field_a == t.field_b)
def test_default_descriptor_value (self):
t = TestDefaultModel()
self.assertTrue(t.field_a == 'default')
self.assertTrue(t.field_b == 'hello')
t.field_a = 'hi'
self.assertTrue(t.field_a == 'hi')
self.assertTrue(t.field_b == 'hello')
del t.field_a
self.assertTrue(t.field_a == 'default')
self.assertTrue(t.field_b == 'hello')
def test_required_fields_set (self):
t = TestRequiredModel()
errors = t.validate()
# In this case only one error should occur because we assigned a
# default value for field_b
self.assertTrue(len(errors) == 1)
self.assertTrue(t.field_b == 'test')
t.field_a = 'woo'
errors = t.validate()
self.assertTrue(len(errors) == 0)
def test_callable_default_values (self):
t = TestCallableDefaultModel()
self.assertTrue(type(t.field_a) == float)
errors = t.validate()
self.assertTrue(len(errors) == 0)
def suite ():
suite = unittest.TestSuite()
suite.addTest(TestBaseModel("testModel"))
return suite
| ianlivingstone/Bologna | bologna/tests/model_test.py | Python | mit | 3,292 |
import pstats
p = pstats.Stats('nohats.profile')
p.sort_stats('time').print_stats(30)
| victorvde/dota2_nohats | stats.py | Python | mit | 86 |
# -*- coding: UTF-8 -*-
"""
This file is part of GEOVAL.
(c) 2016- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
# http://stackoverflow.com/questions/37742912/how-to-compile-pyx-using-cythonize-inside-setup-py
# nstance python setup.py build_ext --inplace, or pip install -e . in th
# good introduction into packing can be found in
# https://python-packaging-user-guide.readthedocs.org/en/latest/index.html
# from setuptools import setup
from distutils.core import setup # use distutils as this allows to build extensions in placee
import os
# import glob
# the setuptools are supposed to be used as a standard. Thats why we ommit
# usage of distutils here
# example of setup.py can be found here:
# https://github.com/pypa/sampleproject/blob/master/setup.py
# a small example how to build dependencies is given here:
# http://stackoverflow.com/questions/11010151/distributing-a-shared-library-and-some-c-code-with-a-cython-extension-module
# import os
import numpy as np
import json
# from setuptools import setup #, Extension
from setuptools import find_packages # Always prefer setuptools over distutils
# from Cython.Distutils import build_ext
from Cython.Build import cythonize
# requires scipy:
# http://stackoverflow.com/questions/11128070/cannot-import-minimize-in-scipy
# install_requires = ["numpy>0.1", "cdo>1.2", "netCDF4", "pytz",
# "matplotlib", 'shapely', 'cartopy', 'cython', 'scipy']
#~ ext_polygon_utils = Extension('polygon_utils',
#~ sources=['.' + os.sep + 'geoval' + os.sep + 'polygon' +
#~ os.sep + 'polygon_utils.pyx'],
#~ # this is needed to get proper information on
#~ # numpy headers
#~ include_dirs=[np.get_include()]
#~ )
def get_current_version():
ppath = os.path.dirname(os.path.realpath(__file__))
return json.load(open(ppath + os.sep + 'geoval' + os.sep + 'version.json'))
def get_packages():
#find_packages(exclude=['contrib', 'docs', 'tests*']),
return find_packages()
setup(name='geoval',
version=get_current_version(),
description='geoval - python based geodata evaluation package',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=get_packages(),
#~ package_dir={'pycmbs': 'pycmbs'},
#~ package_data={'pycmbs': ['benchmarking/configuration/*',
#~ 'benchmarking/logo/*', 'version.json']},
author="Alexander Loew",
author_email='[email protected]',
maintainer='Alexander Loew',
maintainer_email='[email protected]',
license='APACHE 2',
url='https://github.com/pygeo/geoval',
long_description='Geoval is a basic package for geospatial data analysis in python',
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
# install_requires=install_requires,
keywords=["data", "science", "climate", "meteorology",
"model evaluation", "benchmarking", "metrics"],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target
# platform.
#~ entry_points={
#~ 'console_scripts': [
#~ 'pycmbs_benchmarking = pycmbs_benchmarking:main'
#~ ]},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 'Development Status :: 4 - beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Visualization',
# Pick your license as you wish (should match "license" above)
# :w'License :: OSI Approved :: Apache 2.0',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7'
],
#~ ext_modules=[ext_polygon_utils],
#~ cmdclass={'build_ext': build_ext}
ext_modules=cythonize(
["./geoval/polygon/polygon_utils.pyx"]),
# this is needed to get proper information on numpy headers
include_dirs=[np.get_include()]
)
########################################################################
# Some useful information on shipping packages
########################################################################
# PIP
# 1) on a new computer you need to create a .pypirc file like described in the
# pypi documentation
# 2) install twine using pip install twine
# 3) generate package using: python setup.py sdist
# 4) just upload using twine upload dist/*
| pygeo/geoval | setup.py | Python | apache-2.0 | 5,529 |
import random
from typing import Dict, List
quiz_set = [
{"question": "What sound does a dog makes?", "answer": "Woof"},
{"question": "What sound does a cat makes?", "answer": "Meow"},
{"question": "What is 2 + 2?", "answer": "4"},
{"question": "What is sport is Chihayafuru about?", "answer": "karuta"},
]
class Question:
def __init__(self, question: str, answer: str) -> None:
self.question = question
self.answer = answer
def check_answer(self, input_answer: str) -> bool:
return input_answer.lower() == self.answer.lower()
class Quiz:
def __init__(self, question_set: List[Dict[str, str]]) -> None:
self.questions = self._create_questions(question_set)
def _create_questions(self, question_set: List[Dict[str, str]]) -> List[Question]:
"""Return a list of Question objects."""
questions = []
for question in question_set:
questions.append(Question(question["question"], question["answer"]))
random.shuffle(questions)
return questions
def main(debug=False) -> None:
"""Main game loop."""
quiz = Quiz(quiz_set)
print(quiz.questions)
for question in quiz.questions:
while True:
print(question.question)
answer = str(input("What is your answer?"))
if question.check_answer(answer):
print("Correct!")
break
print("Wrong answer. Please try again.")
if debug:
break
if __name__ == "__main__":
main()
| roghu/py3_projects | src/Text/quiz.py | Python | mit | 1,560 |
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="volume", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/volume/_uid.py | Python | mit | 428 |
import os
import unittest
import tornpsql
from decimal import Decimal
from psycopg2._json import Json
from psycopg2.extras import HstoreAdapter
class ConnectionTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
try:
self.db = tornpsql.Connection(database="tornpsql")
except:
pass
def test_file_include(self):
"can reference files relatively"
self.db.file(os.path.join(os.path.dirname(__file__), "example.sql"))
self.assertTrue(self.db.get("SELECT true as t from public.users where name='Mr. Johnson' limit 1;").t)
def test_connection_args(self):
"test connect with args"
db = tornpsql.Connection("127.0.0.1", "tornpsql", os.getenv("postgres", None))
self.assertTrue(db.get("select true as connected").connected)
def test_connection_via_url(self):
"can test connect with args"
db = tornpsql.Connection(os.getenv("ALTERNATE_DATABASE_URL"))
self.assertTrue(db.get("select true as connected").connected)
def test_invlid_connection_args(self):
"can parse connection url"
self.assertRaises(ValueError, tornpsql.Connection, "postgres://user:pass@server:port/database")
self.assertRaises(ValueError, tornpsql.Connection, "postgres://server:port/")
self.assertRaises(ValueError, tornpsql.Connection, "postgres://user:password@server:invalid/database")
self.assertRaises(ValueError, tornpsql.Connection, "postgres://user:password@server/database")
self.assertRaises(ValueError, tornpsql.Connection, "postgres://user:password@:5432")
def test_registering_type(self):
"can register custom types"
self.db.register_type((790, ), "MONEY",
lambda s, cur: Decimal(s.replace(',', '').replace('$', '')) if s is not None else None)
# PS: never, ever, ever use money, use numeric type.
self.assertEqual(self.db.get("select '5.99'::money as a;").a, Decimal('5.99'))
# re-connect to see if the registration sticks
self.db.close()
self.assertEqual(self.db.get("select '5.99'::money as a;").a, Decimal('5.99'))
def test_assert_one_row(self):
"only one row can be returned with get"
self.assertRaisesRegexp(ValueError, "Multiple rows returned", self.db.get, "select * from users")
def test_no_results(self):
"get w/ no results"
self.assertEqual(self.db.get("select true from users where name = 'Sir Albert Einstein';"), None)
def test_executemany(self):
"can execute many"
self.db.execute("truncate other.users;")
self.db.executemany("insert into other.users (name) values (%s);", ["Mr. Smith"], ["Mr. Cramer"])
self.assertEqual(self.db.get("select count(*) as t from other.users where name in ('Mr. Smith', 'Mr. Cramer');").t, 2)
def test_mogrify(self):
"can mogrify w/ inline args"
self.assertEqual(self.db.mogrify("select true from user where email=%s;", "[email protected]"),
b"select true from user where email='[email protected]';")
def test_mogrify_dict(self):
"can mogrify w/ dict args"
self.assertEqual(self.db.mogrify("select true from user where email=%(email)s;", email="[email protected]"),
b"select true from user where email='[email protected]';")
def test_connection_from_url(self):
"can connect from the os.getenv('DATABASE_URL')"
db = tornpsql.Connection()
self.assertTrue(db.get("select true as connected").connected)
def test_adapting(self):
"can adapt data types outside query"
self.assertEqual(self.db.adapt("this").getquoted(), b"'this'")
self.assertIsInstance(self.db.adapt(dict(value=10)), Json)
self.assertIsInstance(self.db.hstore(dict(value=10)), HstoreAdapter)
def test_raises_execptions(self):
"can raise all psycopg2 exceptions"
self.assertRaises(tornpsql.ProgrammingError, self.db.query, "st nothing from th;")
def test_json(self):
"can providing dict as an argument will adapt to json datatype"
self.assertDictEqual(self.db.get("select %s::json as data;", dict(data="something")).data, {'data': 'something'})
def test_hstore(self):
"can parse hstore datatype as dict (kinda)"
self.assertDictEqual(self.db.get("SELECT flags from users where flags is not null limit 1;").flags, dict(extra_feature='true'))
def test_numeric_returns_decimal(self):
"floats return Decimals"
self.assertEqual(self.db.get("select balance from users where id = 1 limit 1;").balance, Decimal("7.10"))
def test_execute_with_kwargs(self):
"can query from keyword arguments"
self.assertDictEqual(self.db.get("SELECT x from generate_series(1,10) x where x=%(id)s;", id=1), dict(x=1))
self.assertListEqual(self.db.query("SELECT x from generate_series(1,10) x where x > %(g)s and x < %(l)s;", g=1, l=5), [{'x': 2}, {'x': 3}, {'x': 4}])
def test_notices(self):
"can retreive notices"
# clear other notices
self.db.notices
self.db.query("set client_min_messages to NOTICE;")
self.db.query("insert into other.users (name) values ('New Customer');")
self.assertListEqual(self.db.notices, ["New user inserted"])
class TransactionalConnectionTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
try:
self.db = tornpsql.TransactionalConnection(database="tornpsql")
except:
pass
def test_commit(self):
"can commit a transaction"
id = self.db.get("insert into other.users (name) values ('New Transactional Customer 1') returning id;").id
self.assertEqual(self.db.get('select name from other.users where id=%s', id).name, 'New Transactional Customer 1')
self.db.commit()
self.assertEqual(self.db.get('select name from other.users where id=%s', id).name, 'New Transactional Customer 1')
def test_rollback(self):
"can rollback a transaction"
id = self.db.get("insert into other.users (name) values ('New Transactional Customer 2') returning id;").id
self.assertEqual(self.db.get('select name from other.users where id=%s', id).name, 'New Transactional Customer 2')
self.db.rollback()
self.assertEqual(self.db.get('select name from other.users where id=%s', id), None)
| stevepeak/tornpsql | tests/tests.py | Python | apache-2.0 | 6,460 |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TicTacToeEnvProblem wraps the TicTacToeEnv in an EnvProblem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.envs import gym_env_problem
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
@registry.register_env_problem
class TicTacToeEnvProblem(gym_env_problem.GymEnvProblem):
"""Plays `batch_size` games of tic-tac-toe."""
def __init__(self):
super(TicTacToeEnvProblem, self).__init__(
base_env_name="T2TEnv-TicTacToeEnv-v0",
reward_range=(-1, 1))
@property
def input_modality(self):
return modalities.ModalityType.IDENTITY_SYMBOL
@property
def input_vocab_size(self):
# Since a box can be either x or o or empty.
return 3
@property
def target_modality(self):
return modalities.ModalityType.IDENTITY_SYMBOL
@property
def target_vocab_size(self):
# Since reward is either -1 or 0 or +1.
return 3
@property
def action_modality(self):
return modalities.ModalityType.SYMBOL_WEIGHTS_ALL
| tensorflow/tensor2tensor | tensor2tensor/envs/tic_tac_toe_env_problem.py | Python | apache-2.0 | 1,691 |
"""
The "travel from home to the park" example from my lectures.
Author: Dana Nau <[email protected]>, May 31, 2013
This file should work correctly in both Python 2.7 and Python 3.2.
"""
import pyhop
import random
import sys
from utils_plan import *
from util_plot import *
# state variables
state1 = pyhop.State('state')
state1.status = 3
state1.concepts = ["Concept A", "Concept B", "Concept C"]
state1.relations = ["Relation A", "Relation B", "Relation C"]
state1.concepts_heard_count = [0,0,0]
state1.relations_heard_count = [0,0,0]
state1.variables = pyhop.State('variables')
state1.variables.affect = pyhop.State('affect')
state1.variables.affect.skill = 0
state1.variables.affect.challenge = 0
state1.variables.affect.boredom = 2
state1.variables.affect.frustration = -2
state1.variables.affect.confidence = DictEx(2)
# end - state variables
# Operators
# TEST: what if we do not intake state as an argument?
# Heard(C, C_HearCount), Frustration--, Boredom++, Skill++
def print_a_concept_1(state, c):
state.status -= 1
state.concepts_heard_count[state.concepts.index(c)] += 1
state.variables.affect.frustration -= 1
state.variables.affect.boredom += 1
state.variables.affect.skill += 1
# print "Style 1", c
return state # TEST: what if we do not return anything?
def print_a_concept_2(state, c):
state.concepts_heard_count[state.concepts.index(c)] += 1
state.status -= 1
state.variables.affect.frustration -= 1
state.variables.affect.boredom += 1
state.variables.affect.skill += 1
# print "Style 2", c
return state # TEST: what if we do not return anything?
def print_a_relation_1(state, r):
state.relations_heard_count[state.relations.index(r)] += 1
state.status -= 1
state.variables.affect.frustration -= 1
state.variables.affect.boredom += 1
state.variables.affect.skill += 1
# print r
return state
def print_a_relation_2(state, r):
state.relations_heard_count[state.relations.index(r)] += 1
state.status -= 1
state.variables.affect.frustration -= 1
state.variables.affect.boredom += 1
state.variables.affect.skill += 1
# print r
return state
# Learned(R*) or Not(Learned(R*)), ((Confidence(C)++) or (Frustration++, Confidence(C)--)), Challenge++, Boredom--
def ask_true_false_on_concept(state, c):
state.status += 1
state.variables.affect.frustration += 1 # how to model user's success rate on a particular question
state.variables.affect.challenge += 1
state.variables.affect.boredom -= 1
# print "Is it true?\n", c
return state
def ask_true_false_on_relation(state, r):
state.status += 1
state.variables.affect.frustration += 1 # how to model user's success rate on a particular question
state.variables.affect.challenge += 1
state.variables.affect.boredom -= 1
# print "Is it true?\n", r
return state
def show_congrats(state, a = 0):
return state
pyhop.declare_operators(print_a_concept_1, print_a_concept_2, print_a_relation_1, print_a_relation_2, ask_true_false_on_concept, ask_true_false_on_relation, show_congrats)
# pyhop.print_operators()
# End - Operators
# Methods
def present_a_concept(state, c):
if state.variables.affect.boredom < 3:
if random.randint(0,100) < 50:
return [('print_a_concept_1', c)]
else:
return [('print_a_concept_2', c)]
return []
def present_a_relation(state, r):
if state.concepts_heard_count[state.relations.index(r)] > 0:
if state.variables.affect.boredom < 3:
if random.randint(0,100) < 50:
return [('print_a_relation_1', r)]
else:
return [('print_a_relation_2', r)]
return []
def quest_on_concept(state, c):
if state.concepts_heard_count[state.concepts.index(c)] > 0:
if state.variables.affect.frustration < 3:
return [('ask_true_false_on_concept', c)]
return []
def quest_on_relation(state, r):
if state.relations_heard_count[state.relations.index(r)] > 0:
if state.variables.affect.frustration < 3:
return [('ask_true_false_on_relation', r)]
return []
def get_random_entry(collection):
return collection[random.randint(0, len(collection)-1)]
def next_step(state, dummy):
r = random.randint(0,100)
if r < 25:
return [("present_a_concept", get_random_entry(state.concepts))]
elif r < 50:
return [("present_a_relation", get_random_entry(state.relations))]
elif r < 75:
return [("quest_on_concept", get_random_entry(state.concepts))]
else:
return [("quest_on_relation", get_random_entry(state.relations))]
def done(state, dummy):
return [("show_congrats", dummy)]
def teach_knowledge(state, target_heard_count):
for hc in state.concepts_heard_count:
if hc < target_heard_count:
return [('next_step', target_heard_count), ('teach', target_heard_count)]
for hc in state.relations_heard_count:
if hc < target_heard_count:
return [('next_step', target_heard_count), ('teach', target_heard_count)]
return [('done', target_heard_count)]
# have to specify the data structure of map and then how that is to be disseminated using the existing methods
pyhop.declare_methods('present_a_concept',present_a_concept)
pyhop.declare_methods('present_a_relation',present_a_relation)
pyhop.declare_methods('quest_on_concept',quest_on_concept)
pyhop.declare_methods('quest_on_relation',quest_on_relation)
pyhop.declare_methods('next_step',next_step)
pyhop.declare_methods('done',done)
pyhop.declare_methods('teach',teach_knowledge)
# print('')
# pyhop.print_methods()
# End - Methods
#result format: ('print_a_concept_1', 'Concept B'), ('print_a_concept_2', 'Concept A'),
# query
versbose_level = 1
target_heard_count = 5
if len(sys.argv) > 1:
if sys.argv[1] == "--help":
print "args: target_heard_count versbose_level"
exit(0)
target_heard_count = int(sys.argv[1])
if len(sys.argv) > 2:
versbose_level = int(sys.argv[2])
print "planning for target_heard_count:", target_heard_count, " with versbose_level:", versbose_level
result = pyhop.pyhop(state1,[('teach', target_heard_count)], verbose=versbose_level)
# plot_plan(result)
state_data = simulate_plan_execute(result, state1)
plot_plan(result, state_data, ["/affect/challenge", "/affect/boredom", "/affect/skill", '/affect/frustration'])
#end - query
| mdasifhasan/Experiments_HTN_Planner | PyHop/exp_1.py | Python | gpl-3.0 | 6,445 |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Address objects for network connections."""
import warnings, os
from zope.interface import implements
from twisted.internet.interfaces import IAddress
class IPv4Address(object):
"""
Object representing an IPv4 socket endpoint.
@ivar type: A string describing the type of transport, either 'TCP' or 'UDP'.
@ivar host: A string containing the dotted-quad IP address.
@ivar port: An integer representing the port number.
"""
# _bwHack is given to old users who think we are a tuple. They expected
# addr[0] to define the socket type rather than the address family, so
# the value comes from a different namespace than the new .type value:
# type = map[_bwHack]
# map = { 'SSL': 'TCP', 'INET': 'TCP', 'INET_UDP': 'UDP' }
implements(IAddress)
def __init__(self, type, host, port, _bwHack = None):
assert type in ('TCP', 'UDP')
self.type = type
self.host = host
self.port = port
self._bwHack = _bwHack
def __getitem__(self, index):
warnings.warn("IPv4Address.__getitem__ is deprecated. Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return (self._bwHack or self.type, self.host, self.port).__getitem__(index)
def __getslice__(self, start, stop):
warnings.warn("IPv4Address.__getitem__ is deprecated. Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return (self._bwHack or self.type, self.host, self.port)[start:stop]
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
elif isinstance(other, IPv4Address):
a = (self.type, self.host, self.port)
b = (other.type, other.host, other.port)
return a == b
return False
def __repr__(self):
return 'IPv4Address(%s, %r, %d)' % (self.type, self.host, self.port)
class UNIXAddress(object):
"""
Object representing a UNIX socket endpoint.
@ivar name: The filename associated with this socket.
@type name: C{str}
"""
implements(IAddress)
def __init__(self, name, _bwHack='UNIX'):
self.name = name
self._bwHack = _bwHack
def __getitem__(self, index):
warnings.warn("UNIXAddress.__getitem__ is deprecated. Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return (self._bwHack, self.name).__getitem__(index)
def __getslice__(self, start, stop):
warnings.warn("UNIXAddress.__getitem__ is deprecated. Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return (self._bwHack, self.name)[start:stop]
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
elif isinstance(other, UNIXAddress):
# First do the simple thing and check to see if the names are the
# same. If not, and the paths exist, check to see if they point to
# the same file.
if self.name == other.name:
return True
else:
try:
return os.path.samefile(self.name, other.name)
except OSError:
pass
return False
def __repr__(self):
return 'UNIXAddress(%r)' % (self.name,)
# These are for buildFactory backwards compatability due to
# stupidity-induced inconsistency.
class _ServerFactoryIPv4Address(IPv4Address):
"""Backwards compatability hack. Just like IPv4Address in practice."""
def __eq__(self, other):
if isinstance(other, tuple):
warnings.warn("IPv4Address.__getitem__ is deprecated. Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return (self.host, self.port) == other
elif isinstance(other, IPv4Address):
a = (self.type, self.host, self.port)
b = (other.type, other.host, other.port)
return a == b
return False
| eunchong/build | third_party/twisted_10_2/twisted/internet/address.py | Python | bsd-3-clause | 4,196 |
import unittest
import numpy
import pytest
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import backend
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'x_shape': [{'n_batch_axes': 1, 'data_shape': (3,)},
{'n_batch_axes': 3, 'data_shape': (3, 5)},
],
'contiguous': ['C', None],
'nobias': [True, False],
}))
@backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{
'use_cuda': True,
}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestNonparameterizedLinear(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.n_batch_axes = self.x_shape['n_batch_axes']
def before_test(self, test_name):
# TODO(crcrpar): Remove this relaxation when
# a known issue in the reduction of ChainerX is resolved.
if test_name == 'test_forward':
if (self.x_dtype == numpy.float16 and
self.W_dtype == numpy.float16 and
self.n_batch_axes == 3 and
self.backend_config.use_chainerx and
self.backend_config.chainerx_device == 'native:0'):
self.check_forward_options['atol'] = 5e-3
def generate_inputs(self):
data_shape = self.x_shape['data_shape']
batch_shape = (4,) + (2,) * (self.n_batch_axes - 1)
x = numpy.random.uniform(
-1, 1, batch_shape + data_shape).astype(self.x_dtype)
input_size = numpy.prod(data_shape)
W = numpy.random.uniform(-1, 1, (2, input_size)).astype(self.W_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(-1, 1, 2).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
x, W = inputs[:2]
if self.n_batch_axes > 1:
batch_shape = x.shape[:self.n_batch_axes]
batch_size = numpy.prod(batch_shape)
x = x.reshape(batch_size, -1)
y = x.dot(W.T)
if not self.nobias:
y += inputs[-1]
if self.n_batch_axes > 1:
y = y.reshape(batch_shape + (-1,))
return y.astype(self.x_dtype),
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y = functions.linear(x, W, b, n_batch_axes=self.n_batch_axes)
return y,
class TestLinearBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
with chainer.using_config('use_ideep', 'never'):
n_batches = 1 # important
in_dims = (2, 2)
out_dim = 3
x_shape = (n_batches,) + in_dims
w_shape = (out_dim, numpy.prod(in_dims),)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = functions.linear(chainer.Variable(x), w)
z = functions.sum(y)
z.backward()
class TestLinearNBatchAxesBoundaryCondition(unittest.TestCase):
def setUp(self):
self.W = numpy.random.uniform(
-1, 1, (2, 15)).astype(numpy.float32)
self.x = numpy.random.uniform(
-1, 1, (3, 3, 5)).astype(numpy.float32)
def test_negative(self):
n_batch_axes = -1
with pytest.raises(ValueError):
functions.linear(self.x, self.W, n_batch_axes=n_batch_axes)
def test_zero(self):
n_batch_axes = 0
with pytest.raises(ValueError):
functions.linear(self.x, self.W, n_batch_axes=n_batch_axes)
testing.run_module(__name__, __file__)
| okuta/chainer | tests/chainer_tests/functions_tests/connection_tests/test_linear.py | Python | mit | 4,516 |
# coding: utf-8
from setuptools import find_packages, setup
install_requires = ["requests>=2.20.0"]
with open("README.rst") as file:
long_description = file.read()
setup(
name="postmarker",
url="https://github.com/Stranger6667/postmarker",
version="0.18.2",
license="MIT",
author="Dmitry Dygalo",
author_email="[email protected]",
maintainer="Dmitry Dygalo",
maintainer_email="[email protected]",
keywords=["postmark", "api", "client", "email"],
description="Python client library for Postmark API",
long_description=long_description,
long_description_content_type="text/x-rst",
packages=find_packages(where="src"),
package_dir={"": "src"},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Communications :: Email",
],
include_package_data=True,
install_requires=install_requires,
entry_points={"pytest11": ["postmark = postmarker.pytest"]},
)
| Stranger6667/postmarker | setup.py | Python | mit | 1,616 |
# Copyright (c) 2014 Tobias Marquardt
#
# Distributed under terms of the (2-clause) BSD license.
"""
Low level functions for creation of irc client messages, as well as classes
with constants for string commands, numeric commands and error responses.
Specification: RFC 2812 'IRC: Client Protocol'.
"""
__all__ = ['Err']
class Cmd:
""" Commands """
# Message Registration
PASS = 'PASS'
NICK = 'NICK'
USER = 'USER'
QUIT = 'QUIT'
MODE = 'MODE'
# Channel Operations
JOIN = 'JOIN'
PART = 'PART'
KICK = 'KICK'
TOPIC = 'TOPIC'
# Sending Messages
PRIVMSG = 'PRIVMSG'
# Miscellaneous
PING = 'PING'
PONG = 'PONG'
class UserMode:
""" User Modes
User modes apply server-wide. So user mode 'o' should not be confused
with channel mode 'o'.
"""
AWAY = 'a'
INVISIBLE = 'i'
WALLOPS_RECEIVER = 'w'
RESTRICTED = 'r'
OPERATOR = 'o'
LOCAL_OPERATOR = 'O'
NOTICES_RECEIVER = 's'
class ChannelMode:
""" Channel Modes """
# Channel modes affecting a single user
OPERATOR = 'o'
VOICE = 'v'
# Channel modes affecting the channel itself
# Not yet implemented...
class Rpl:
""" Command Replies """
WELCOME = 1
NAMREPLY = 353
ENDOFNAMES = 366
TOPIC = 332
class Err:
""" Error Replies
Contains numerical constants for errors as defined by the irc client
protocol as well as the keywords of error specific parameters.
Error numbers and parameters are passed to
:py:meth:`handle_error(self, error, **params)<fredirc.IRCHandler.handle_error()>`.
You can take a look at :py:attr:`.ERROR_PARAMETERS` to find out the
parameter keywords for a particular error.
See the :ref:`beginner's guide<guide_handle-errors>` for an example on how
to use this class to handle errors.
"""
NOSUCHNICK = 401
NOSUCHSERVER = 402
NOSUCHCHANNEL = 403
CANNOTSENDTOCHAN = 404
TOOMANYCHANNELS = 405
WASNOSUCHNICK = 406
TOOMANYTARGETS = 407
NOSUCHSERVICE = 408
NOORIGIN = 409
NORECIPIENT = 411
NOTEXTTOSEND = 412
NOTOPLEVEL = 413
WILDTOPLEVEL = 414
BADMASK = 415
UNKNOWNCOMMAND = 421
NOMOTD = 422
NOADMININFO = 423
FILEERROR = 424
NONICKNAMEGIVEN = 431
ERRONEUSNICKNAME = 432
NICKNAMEINUSE = 433
NICKCOLLISION = 436
UNAVAILRESOURCE = 437
USERNOTINCHANNEL = 441
NOTONCHANNEL = 442
USERONCHANNEL = 443
NOLOGIN = 444
SUMMONDISABLED = 445
USERSDISABLED = 446
NOTREGISTERED = 451
NEEDMOREPARAMS = 461
ALREADYREGISTRED = 462
NOPERMFORHOST = 463
PASSWDMISMATCH = 464
YOUREBANNEDCREEP = 465
KEYSET = 467
YOUWILLBEBANNED = 466
CHANNELISFULL = 471
UNKNOWNMODE = 472
INVITEONLYCHAN = 473
BANNEDFROMCHAN = 474
BADCHANNELKEY = 475
BADCHANMASK = 476
NOCHANMODES = 477
BANLISTFULL = 478
NOPRIVILEGES = 481
CHANOPRIVSNEEDED = 482
CANTKILLSERVER = 483
RESTRICTED = 484
UNIQOPPRIVSNEEDED = 485
NOOPERHOST = 491
UMODEUNKNOWNFLAG = 501
USERSDONTMATCH = 502
ERROR_PARAMETERS = {
NOSUCHNICK: ['nick', 'message'],
NOSUCHSERVER: ['server_name', 'message'],
NOSUCHCHANNEL: ['channel_name', 'message'],
CANNOTSENDTOCHAN: ['channel_name', 'message'],
TOOMANYCHANNELS: ['channel_name', 'message'],
WASNOSUCHNICK: ['nick', 'message'],
TOOMANYTARGETS: ['target', 'message'],
NOSUCHSERVICE: ['service_name', 'message'],
NOORIGIN: ['message'],
NORECIPIENT: ['message'],
NOTEXTTOSEND: ['message'],
NOTOPLEVEL: ['mask', 'message'],
WILDTOPLEVEL: ['mask', 'message'],
BADMASK: ['mask', 'message'],
UNKNOWNCOMMAND: ['command', 'message'],
NOMOTD: ['message'],
NOADMININFO: ['server', 'message'],
FILEERROR: ['message'],
NONICKNAMEGIVEN: ['message'],
ERRONEUSNICKNAME: ['nick', 'message'],
NICKNAMEINUSE: ['nick', 'message'],
NICKCOLLISION: ['nick', 'message'],
UNAVAILRESOURCE: ['nick', 'message'],
USERNOTINCHANNEL: ['nick', 'channel', 'message'],
NOTONCHANNEL: ['channel', 'message'],
USERONCHANNEL: ['user', 'channel', 'message'],
NOLOGIN: ['user', 'channel', 'message'],
SUMMONDISABLED: ['message'],
USERSDISABLED: ['message'],
NOTREGISTERED: ['message'],
NEEDMOREPARAMS: ['command', 'message'],
ALREADYREGISTRED: ['message'],
NOPERMFORHOST: ['message'],
PASSWDMISMATCH: ['message'],
YOUREBANNEDCREEP: ['message'],
YOUWILLBEBANNED: [],
KEYSET: ['channel', 'message'],
CHANNELISFULL: ['channel', 'message'],
UNKNOWNMODE: ['mode', 'message'],
INVITEONLYCHAN: ['channel', 'message'],
BANNEDFROMCHAN: ['channel', 'message'],
BADCHANNELKEY: ['channel', 'message'],
BADCHANMASK: ['channel', 'message'],
NOCHANMODES: ['channel', 'message'],
BANLISTFULL: ['channel', 'message'],
NOPRIVILEGES: ['message'],
CHANOPRIVSNEEDED: ['channel', 'message'],
CANTKILLSERVER: ['message'],
RESTRICTED: ['message'],
UNIQOPPRIVSNEEDED: ['message'],
NOOPERHOST: ['message'],
UMODEUNKNOWNFLAG: ['message'],
USERSDONTMATCH: ['message'],
}
def nick(name):
return '{nick_cmd} {name}'.format(
nick_cmd=Cmd.NICK, name=name)
def password(pwd=None):
if pwd:
return '{pass_cmd} :{password}'.format(
pass_cmd=Cmd.PASS, password=pwd)
else:
return Cmd.PASS
def user(user_name, real_name, invisible=False, receive_wallops=False):
# TODO set mode correctly
mode = 0
return '{user_cmd} {user} {mode} * :{real_name}'.format(
user_cmd=Cmd.USER, user=user_name, mode=mode, real_name=real_name)
def quit(message=None):
if message:
return '{quit_cmd} :{message}'.format(
quit_cmd=Cmd.QUIT, message=message)
else:
return Cmd.QUIT
def join(channels):
return '{join_cmd} {channels}'.format(
join_cmd=Cmd.JOIN, channels=','.join(channels))
def pong(server):
return '{pong_cmd} :{server}'.format(
pong_cmd=Cmd.PONG, server=server)
def privmsg(target, message, sender=None):
if not sender:
sender = ''
return ':{sender} {msg_cmd} {target} :{message}'.format(
sender=sender, msg_cmd=Cmd.PRIVMSG, target=target, message=message)
def part(channels, message):
return '{part_cmd} {channels} :{message}'.format(
part_cmd=Cmd.PART, channels=','.join(channels), message=message)
def kick(channels, users, message=None):
if not message:
message = ''
return '{kick_cmd} {channels} {users} :{message}'.format(
kick_cmd=Cmd.KICK, channels=','.join(channels), users=','.join(users),
message=message)
def channel_mode(channel, mode_change):
"""
Args:
mode_change (ChannelModelChange)
"""
change = '+' if mode_change.added else '-'
return '{mode_cmd} {channel} {change}{mode} {params}'.format(
mode_cmd=Cmd.MODE, channel=channel, change=change,
mode=mode_change.mode, params=' '.join(mode_change.params)) | worblehat/FredIRC | fredirc/messages.py | Python | bsd-2-clause | 7,306 |
from Products.CMFCore.permissions import setDefaultRoles
ADD_RATING_PERMISSION = 'ATRatings: Add rating'
setDefaultRoles(ADD_RATING_PERMISSION, ( 'Manager', 'Owner', 'Authenticated') )
| erikriver/eduIntelligent-cynin | products/ATRatings/Permissions.py | Python | gpl-3.0 | 192 |
# f90wrap: F90 to Python interface generator with derived type support
#
# Copyright James Kermode 2011-2018
#
# This file is part of f90wrap
# For the latest version see github.com/jameskermode/f90wrap
#
# f90wrap is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# f90wrap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with f90wrap. If not, see <http://www.gnu.org/licenses/>.
#
# If you would like to license the source code under different terms,
# please contact James Kermode, [email protected]
import test_python
import numpy
a = numpy.ones(test_python.test_module.m, dtype=numpy.float32)
b = test_python.test_module.Test_Type2_Xn_Array()
c = test_python.test_module.Test_Type2_Xn_Array()
d = test_python.test_module.Test_Type2_Xm_Array()
e = test_python.test_module.Test_Type2_X5_Array()
f = numpy.ones(1)
test_python.test_module.test_routine4(a, b, c, d, e, f)
print(a)
print(list(b.items[i].y for i in range(len(b.items))))
print(list(c.items[i].y for i in range(len(c.items))))
print(list(d.items[i].y for i in range(len(d.items))))
print(list(e.items[i].y for i in range(len(e.items))))
print(f)
assert(all(a == numpy.array([42, 1, 1, 1, 1], dtype=numpy.float32)))
assert(b.items[1].y[1] == 42)
assert(c.items[2].y[2] == 42)
assert(d.items[3].y[3] == 42)
assert(e.items[4].y[4] == 42)
assert(f == 2)
| jameskermode/f90wrap | examples/fixed_1D_derived_type_array_argument/tests.py | Python | lgpl-3.0 | 1,805 |
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
class genericBatchInputPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'generic','page':'batchinput'})
html = html + template.render (templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberbatchinput.html', {
'model':'generic',
'model_attributes':'generic Batch Input'})
html = html + template.render(templatepath + '04uberbatchinput_jquery.html', {})
html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', genericBatchInputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| puruckertom/poptox | poptox/generic/generic_batchinput.py | Python | unlicense | 1,372 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import traceback
from functools import wraps
from flask import request
from werkzeug.exceptions import NotFound, Unauthorized
from indico.util.json import create_json_error_answer
class classproperty(property):
def __get__(self, obj, type=None):
return self.fget.__get__(None, type)()
class strict_classproperty(classproperty):
"""A classproperty that does not work on instances.
This is useful for properties which would be confusing when
accessed through an instance. However, using this property
still won't allow you to set the attribute on the instance
itself, so it's really just to stop people from accessing
the property in an inappropriate way.
"""
def __get__(self, obj, type=None):
if obj is not None:
raise AttributeError('Attribute is not available on instances of {}'.format(type.__name__))
return super(strict_classproperty, self).__get__(obj, type)
class cached_classproperty(property):
def __get__(self, obj, objtype=None):
# The property name is the function's name
name = self.fget.__get__(True).im_func.__name__
# In case of inheritance the attribute might be defined in a superclass
for mrotype in objtype.__mro__:
try:
value = object.__getattribute__(mrotype, name)
except AttributeError:
pass
else:
break
else:
raise AttributeError(name)
# We we have a cached_classproperty, the value has not been resolved yet
if isinstance(value, cached_classproperty):
value = self.fget.__get__(None, objtype)()
setattr(objtype, name, value)
return value
def cached_writable_property(cache_attr, cache_on_set=True):
class _cached_writable_property(property):
def __get__(self, obj, objtype=None):
if obj is not None and self.fget and hasattr(obj, cache_attr):
return getattr(obj, cache_attr)
value = property.__get__(self, obj, objtype)
setattr(obj, cache_attr, value)
return value
def __set__(self, obj, value):
property.__set__(self, obj, value)
if cache_on_set:
setattr(obj, cache_attr, value)
else:
try:
delattr(obj, cache_attr)
except AttributeError:
pass
def __delete__(self, obj):
property.__delete__(self, obj)
try:
delattr(obj, cache_attr)
except AttributeError:
pass
return _cached_writable_property
def jsonify_error(function=None, logger_name='requestHandler', logger_message=None, logging_level='info', status=200):
"""
Returns response of error handlers in JSON if requested in JSON
and logs the exception that ended the request.
"""
from indico.core.errors import IndicoError, NotFoundError
from indico.core.logger import Logger
no_tb_exceptions = (NotFound, NotFoundError, Unauthorized)
def _jsonify_error(f):
@wraps(f)
def wrapper(*args, **kw):
for e in list(args) + kw.values():
if isinstance(e, Exception):
exception = e
break
else:
raise IndicoError('Wrong usage of jsonify_error: No error found in params')
tb = ''
if logging_level != 'exception' and not isinstance(exception, no_tb_exceptions):
tb = traceback.format_exc()
logger_fn = getattr(Logger.get(logger_name), logging_level)
logger_fn(
logger_message if logger_message else
'Request finished: {} ({})\n{}'.format(exception.__class__.__name__, exception, tb).rstrip()
)
# allow e.g. NoReportError to specify a status code without possibly
# breaking old code that expects it with a 200 code.
# new variable name since python2 doesn't have `nonlocal`...
used_status = getattr(exception, 'http_status_code', status)
if request.is_xhr or request.headers.get('Content-Type') == 'application/json':
return create_json_error_answer(exception, status=used_status)
else:
args[0]._responseUtil.status = used_status
return f(*args, **kw)
return wrapper
if function:
return _jsonify_error(function)
return _jsonify_error
def smart_decorator(f):
"""Decorator to make decorators work both with and without arguments.
This decorator allows you to use a decorator both without arguments::
@fancy_decorator
def function():
pass
And also with arguments::
@fancy_decorator(123, foo='bar')
def function():
pass
The only limitation is that the decorator itself MUST NOT allow a callable object
as the first positional argument, unless there is at least one other mandatory argument.
The decorator decorated with `smart_decorator` obviously needs to have default values for
all arguments but the first one::
@smart_decorator
def requires_location(f, some='args', are='here'):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
"""
@wraps(f)
def wrapper(*args, **kw):
if len(args) == 1 and not kw and callable(args[0]):
return f(args[0])
else:
return lambda original: f(original, *args, **kw)
return wrapper
| DavidAndreev/indico | indico/util/decorators.py | Python | gpl-3.0 | 6,415 |
import sys
import time
from hypertable.thriftclient import *
from hyperthrift.gen.ttypes import *
try:
client = ThriftClient("localhost", 38080)
print "HQL examples"
try:
namespace = client.namespace_open("bad")
except:
print "Caught exception when tyring to open 'bad' namespace"
namespace = client.namespace_open("test")
res = client.hql_query(namespace, "show tables")
print res
res = client.hql_query(namespace, "select * from thrift_test")
print res
print "mutator examples";
mutator = client.mutator_open(namespace, "thrift_test", 0, 0);
client.mutator_set_cell(mutator, Cell(Key("py-k1", "col", None), "py-v1"))
client.mutator_flush(mutator);
client.mutator_close(mutator);
print "shared mutator examples";
mutate_spec = MutateSpec("test_py", 1000, 0);
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k1", "col", None), "py-put-v1"))
client.shared_mutator_refresh(namespace, "thrift_test", mutate_spec)
client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k2", "col", None), "py-put-v2"))
time.sleep(2)
print "scanner examples";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
print "asynchronous api examples\n";
future = client.future_open(0);
mutator_async_1 = client.async_mutator_open(namespace, "thrift_test", future, 0);
mutator_async_2 = client.async_mutator_open(namespace, "thrift_test", future, 0);
client.async_mutator_set_cell(mutator_async_1, Cell(Key("py-k1","col", None), "py-v1-async"));
client.async_mutator_set_cell(mutator_async_2, Cell(Key("py-k1","col", None), "py-v2-async"));
client.async_mutator_flush(mutator_async_1);
client.async_mutator_flush(mutator_async_2);
num_results=0;
while True:
result = client.future_get_result(future, 0);
if(result.is_empty):
break
num_results+=1;
print result;
if (result.is_error or result.is_scan):
print "Unexpected result\n"
exit(1);
if (num_results>2):
print "Expected only 2 results\n"
exit(1)
if (num_results!=2):
print "Expected only 2 results\n"
exit(1)
if (client.future_is_cancelled(future) or client.future_is_full(future) or not (client.future_is_empty(future)) or client.future_has_outstanding(future)):
print "Future object in unexpected state"
exit(1)
client.async_mutator_close(mutator_async_1)
client.async_mutator_close(mutator_async_2)
color_scanner = client.async_scanner_open(namespace, "FruitColor", future, ScanSpec(None, None, None, 1));
location_scanner = client.async_scanner_open(namespace, "FruitLocation", future, ScanSpec(None, None, None, 1));
energy_scanner = client.async_scanner_open(namespace, "FruitEnergy", future, ScanSpec(None, None, None, 1));
expected_cells = 6;
num_cells = 0;
while True:
result = client.future_get_result(future, 0);
print result;
if (result.is_empty or result.is_error or not(result.is_scan) ):
print "Unexpected result\n"
exit(1);
for cell in result.cells:
print cell;
num_cells+=1;
if(num_cells >= 6):
client.future_cancel(future);
break;
if (not client.future_is_cancelled(future)):
print "Expected future ops to be cancelled\n"
exit(1)
print "regexp scanner example";
scanner = client.scanner_open(namespace, "thrift_test",
ScanSpec(None, None, None, 1, 0, None, None, ["col"], False,0, 0, "k", "v[24]"));
while True:
cells = client.scanner_get_cells(scanner)
if (len(cells) == 0):
break
print cells
client.scanner_close(scanner)
client.async_scanner_close(color_scanner);
client.async_scanner_close(location_scanner);
client.async_scanner_close(energy_scanner);
client.future_close(future);
client.namespace_close(namespace)
except:
print sys.exc_info()
raise
| nijx/hypertable | src/py/ThriftClient/client_test.py | Python | gpl-3.0 | 4,079 |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate system call invocation macros
This script parses the system call metadata JSON file emitted by
parse_syscalls.py to create several files:
- A file containing weak aliases of any potentially unimplemented system calls,
as well as the system call dispatch table, which maps system call type IDs
to their handler functions.
- A header file defining the system call type IDs, as well as function
prototypes for all system call handler functions.
- A directory containing header files. Each header corresponds to a header
that was identified as containing system call declarations. These
generated headers contain the inline invocation functions for each system
call in that header.
"""
import sys
import re
import argparse
import os
import json
# Some kernel headers cannot include automated tracing without causing unintended recursion or
# other serious issues.
# These headers typically already have very specific tracing hooks for all relevant things
# written by hand so are excluded.
notracing = ["kernel.h", "errno_private.h"]
types64 = ["int64_t", "uint64_t"]
# The kernel linkage is complicated. These functions from
# userspace_handlers.c are present in the kernel .a library after
# userspace.c, which contains the weak fallbacks defined here. So the
# linker finds the weak one first and stops searching, and thus won't
# see the real implementation which should override. Yet changing the
# order runs afoul of a comment in CMakeLists.txt that the order is
# critical. These are core syscalls that won't ever be unconfigured,
# just disable the fallback mechanism as a simple workaround.
noweak = ["z_mrsh_k_object_release",
"z_mrsh_k_object_access_grant",
"z_mrsh_k_object_alloc"]
table_template = """/* auto-generated by gen_syscalls.py, don't edit */
/* Weak handler functions that get replaced by the real ones unless a system
* call is not implemented due to kernel configuration.
*/
%s
const _k_syscall_handler_t _k_syscall_table[K_SYSCALL_LIMIT] = {
\t%s
};
"""
list_template = """
/* auto-generated by gen_syscalls.py, don't edit */
#ifndef ZEPHYR_SYSCALL_LIST_H
#define ZEPHYR_SYSCALL_LIST_H
%s
#ifndef _ASMLANGUAGE
#include <stdint.h>
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_SYSCALL_LIST_H */
"""
syscall_template = """
/* auto-generated by gen_syscalls.py, don't edit */
{include_guard}
{tracing_include}
#ifndef _ASMLANGUAGE
#include <syscall_list.h>
#include <syscall.h>
#include <linker/sections.h>
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push
#endif
#ifdef __GNUC__
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#if !defined(__XCC__)
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
#endif
#ifdef __cplusplus
extern "C" {{
#endif
{invocations}
#ifdef __cplusplus
}}
#endif
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
#endif
#endif
#endif /* include guard */
"""
handler_template = """
extern uintptr_t z_hdlr_%s(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, void *ssf);
"""
weak_template = """
__weak ALIAS_OF(handler_no_syscall)
uintptr_t %s(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, void *ssf);
"""
# defines a macro wrapper which supercedes the syscall when used
# and provides tracing enter/exit hooks while allowing per compilation unit
# enable/disable of syscall tracing. Used for returning functions
# Note that the last argument to the exit macro is the return value.
syscall_tracer_with_return_template = """
#ifndef DISABLE_SYSCALL_TRACING
{trace_diagnostic}
#define {func_name}({argnames}) ({{ \
{func_type} retval; \
sys_port_trace_syscall_enter({syscall_id}, {func_name}{trace_argnames}); \
retval = {func_name}({argnames}); \
sys_port_trace_syscall_exit({syscall_id}, {func_name}{trace_argnames}, retval); \
retval; \
}})
#endif
"""
# defines a macro wrapper which supercedes the syscall when used
# and provides tracing enter/exit hooks while allowing per compilation unit
# enable/disable of syscall tracing. Used for non-returning (void) functions
syscall_tracer_void_template = """
#ifndef DISABLE_SYSCALL_TRACING
{trace_diagnostic}
#define {func_name}({argnames}) do {{ \
sys_port_trace_syscall_enter({syscall_id}, {func_name}{trace_argnames}); \
{func_name}({argnames}); \
sys_port_trace_syscall_exit({syscall_id}, {func_name}{trace_argnames}); \
}} while(false)
#endif
"""
typename_regex = re.compile(r'(.*?)([A-Za-z0-9_]+)$')
class SyscallParseException(Exception):
pass
def typename_split(item):
if "[" in item:
raise SyscallParseException(
"Please pass arrays to syscalls as pointers, unable to process '%s'" %
item)
if "(" in item:
raise SyscallParseException(
"Please use typedefs for function pointers")
mo = typename_regex.match(item)
if not mo:
raise SyscallParseException("Malformed system call invocation")
m = mo.groups()
return (m[0].strip(), m[1])
def need_split(argtype):
return (not args.long_registers) and (argtype in types64)
# Note: "lo" and "hi" are named in little endian conventions,
# but it doesn't matter as long as they are consistently
# generated.
def union_decl(type):
return "union { struct { uintptr_t lo, hi; } split; %s val; }" % type
def wrapper_defs(func_name, func_type, args, fn):
ret64 = need_split(func_type)
mrsh_args = [] # List of rvalue expressions for the marshalled invocation
split_args = []
nsplit = 0
for argtype, argname in args:
if need_split(argtype):
split_args.append((argtype, argname))
mrsh_args.append("parm%d.split.lo" % nsplit)
mrsh_args.append("parm%d.split.hi" % nsplit)
nsplit += 1
else:
mrsh_args.append("*(uintptr_t *)&" + argname)
if ret64:
mrsh_args.append("(uintptr_t)&ret64")
decl_arglist = ", ".join([" ".join(argrec) for argrec in args]) or "void"
syscall_id = "K_SYSCALL_" + func_name.upper()
wrap = "extern %s z_impl_%s(%s);\n" % (func_type, func_name, decl_arglist)
wrap += "\n"
wrap += "__pinned_func\n"
wrap += "static inline %s %s(%s)\n" % (func_type, func_name, decl_arglist)
wrap += "{\n"
wrap += "#ifdef CONFIG_USERSPACE\n"
wrap += ("\t" + "uint64_t ret64;\n") if ret64 else ""
wrap += "\t" + "if (z_syscall_trap()) {\n"
for parmnum, rec in enumerate(split_args):
(argtype, argname) = rec
wrap += "\t\t%s parm%d;\n" % (union_decl(argtype), parmnum)
wrap += "\t\t" + "parm%d.val = %s;\n" % (parmnum, argname)
if len(mrsh_args) > 6:
wrap += "\t\t" + "uintptr_t more[] = {\n"
wrap += "\t\t\t" + (",\n\t\t\t".join(mrsh_args[5:])) + "\n"
wrap += "\t\t" + "};\n"
mrsh_args[5:] = ["(uintptr_t) &more"]
invoke = ("arch_syscall_invoke%d(%s)"
% (len(mrsh_args),
", ".join(mrsh_args + [syscall_id])))
# Coverity does not understand syscall mechanism
# and will already complain when any function argument
# is not of exact size as uintptr_t. So tell Coverity
# to ignore this particular rule here.
wrap += "\t\t/* coverity[OVERRUN] */\n"
if ret64:
wrap += "\t\t" + "(void)%s;\n" % invoke
wrap += "\t\t" + "return (%s)ret64;\n" % func_type
elif func_type == "void":
wrap += "\t\t" + "%s;\n" % invoke
wrap += "\t\t" + "return;\n"
else:
wrap += "\t\t" + "return (%s) %s;\n" % (func_type, invoke)
wrap += "\t" + "}\n"
wrap += "#endif\n"
# Otherwise fall through to direct invocation of the impl func.
# Note the compiler barrier: that is required to prevent code from
# the impl call from being hoisted above the check for user
# context.
impl_arglist = ", ".join([argrec[1] for argrec in args])
impl_call = "z_impl_%s(%s)" % (func_name, impl_arglist)
wrap += "\t" + "compiler_barrier();\n"
wrap += "\t" + "%s%s;\n" % ("return " if func_type != "void" else "",
impl_call)
wrap += "}\n"
if fn not in notracing:
argnames = ", ".join([f"{argname}" for _, argname in args])
trace_argnames = ""
if len(args) > 0:
trace_argnames = ", " + argnames
trace_diagnostic = ""
if os.getenv('TRACE_DIAGNOSTICS'):
trace_diagnostic = f"#warning Tracing {func_name}"
if func_type != "void":
wrap += syscall_tracer_with_return_template.format(func_type=func_type, func_name=func_name,
argnames=argnames, trace_argnames=trace_argnames,
syscall_id=syscall_id, trace_diagnostic=trace_diagnostic)
else:
wrap += syscall_tracer_void_template.format(func_type=func_type, func_name=func_name,
argnames=argnames, trace_argnames=trace_argnames,
syscall_id=syscall_id, trace_diagnostic=trace_diagnostic)
return wrap
# Returns an expression for the specified (zero-indexed!) marshalled
# parameter to a syscall, with handling for a final "more" parameter.
def mrsh_rval(mrsh_num, total):
if mrsh_num < 5 or total <= 6:
return "arg%d" % mrsh_num
else:
return "(((uintptr_t *)more)[%d])" % (mrsh_num - 5)
def marshall_defs(func_name, func_type, args):
mrsh_name = "z_mrsh_" + func_name
nmrsh = 0 # number of marshalled uintptr_t parameter
vrfy_parms = [] # list of (arg_num, mrsh_or_parm_num, bool_is_split)
split_parms = [] # list of a (arg_num, mrsh_num) for each split
for i, (argtype, _) in enumerate(args):
if need_split(argtype):
vrfy_parms.append((i, len(split_parms), True))
split_parms.append((i, nmrsh))
nmrsh += 2
else:
vrfy_parms.append((i, nmrsh, False))
nmrsh += 1
# Final argument for a 64 bit return value?
if need_split(func_type):
nmrsh += 1
decl_arglist = ", ".join([" ".join(argrec) for argrec in args])
mrsh = "extern %s z_vrfy_%s(%s);\n" % (func_type, func_name, decl_arglist)
mrsh += "uintptr_t %s(uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,\n" % mrsh_name
if nmrsh <= 6:
mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, void *ssf)\n"
else:
mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n"
mrsh += "{\n"
mrsh += "\t" + "_current->syscall_frame = ssf;\n"
for unused_arg in range(nmrsh, 6):
mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg
if nmrsh > 6:
mrsh += ("\tZ_OOPS(Z_SYSCALL_MEMORY_READ(more, "
+ str(nmrsh - 6) + " * sizeof(uintptr_t)));\n")
for i, split_rec in enumerate(split_parms):
arg_num, mrsh_num = split_rec
arg_type = args[arg_num][0]
mrsh += "\t%s parm%d;\n" % (union_decl(arg_type), i)
mrsh += "\t" + "parm%d.split.lo = %s;\n" % (i, mrsh_rval(mrsh_num,
nmrsh))
mrsh += "\t" + "parm%d.split.hi = %s;\n" % (i, mrsh_rval(mrsh_num + 1,
nmrsh))
# Finally, invoke the verify function
out_args = []
for i, argn, is_split in vrfy_parms:
if is_split:
out_args.append("parm%d.val" % argn)
else:
out_args.append("*(%s*)&%s" % (args[i][0], mrsh_rval(argn, nmrsh)))
vrfy_call = "z_vrfy_%s(%s)\n" % (func_name, ", ".join(out_args))
if func_type == "void":
mrsh += "\t" + "%s;\n" % vrfy_call
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return 0;\n"
else:
mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call)
if need_split(func_type):
ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh)
mrsh += "\t" + "Z_OOPS(Z_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr
mrsh += "\t" + "*%s = ret;\n" % ptr
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return 0;\n"
else:
mrsh += "\t" + "_current->syscall_frame = NULL;\n"
mrsh += "\t" + "return (uintptr_t) ret;\n"
mrsh += "}\n"
return mrsh, mrsh_name
def analyze_fn(match_group, fn):
func, args = match_group
try:
if args == "void":
args = []
else:
args = [typename_split(a.strip()) for a in args.split(",")]
func_type, func_name = typename_split(func)
except SyscallParseException:
sys.stderr.write("In declaration of %s\n" % func)
raise
sys_id = "K_SYSCALL_" + func_name.upper()
marshaller = None
marshaller, handler = marshall_defs(func_name, func_type, args)
invocation = wrapper_defs(func_name, func_type, args, fn)
# Entry in _k_syscall_table
table_entry = "[%s] = %s" % (sys_id, handler)
return (handler, invocation, marshaller, sys_id, table_entry)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--json-file", required=True,
help="Read syscall information from json file")
parser.add_argument("-d", "--syscall-dispatch", required=True,
help="output C system call dispatch table file")
parser.add_argument("-l", "--syscall-list", required=True,
help="output C system call list header")
parser.add_argument("-o", "--base-output", required=True,
help="Base output directory for syscall macro headers")
parser.add_argument("-s", "--split-type", action="append",
help="A long type that must be split/marshalled on 32-bit systems")
parser.add_argument("-x", "--long-registers", action="store_true",
help="Indicates we are on system with 64-bit registers")
args = parser.parse_args()
def main():
parse_args()
if args.split_type is not None:
for t in args.split_type:
types64.append(t)
with open(args.json_file, 'r') as fd:
syscalls = json.load(fd)
invocations = {}
mrsh_defs = {}
mrsh_includes = {}
ids = []
table_entries = []
handlers = []
for match_group, fn in syscalls:
handler, inv, mrsh, sys_id, entry = analyze_fn(match_group, fn)
if fn not in invocations:
invocations[fn] = []
invocations[fn].append(inv)
ids.append(sys_id)
table_entries.append(entry)
handlers.append(handler)
if mrsh:
syscall = typename_split(match_group[0])[1]
mrsh_defs[syscall] = mrsh
mrsh_includes[syscall] = "#include <syscalls/%s>" % fn
with open(args.syscall_dispatch, "w") as fp:
table_entries.append("[K_SYSCALL_BAD] = handler_bad_syscall")
weak_defines = "".join([weak_template % name
for name in handlers
if not name in noweak])
# The "noweak" ones just get a regular declaration
weak_defines += "\n".join(["extern uintptr_t %s(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, void *ssf);"
% s for s in noweak])
fp.write(table_template % (weak_defines,
",\n\t".join(table_entries)))
# Listing header emitted to stdout
ids.sort()
ids.extend(["K_SYSCALL_BAD", "K_SYSCALL_LIMIT"])
ids_as_defines = ""
for i, item in enumerate(ids):
ids_as_defines += "#define {} {}\n".format(item, i)
with open(args.syscall_list, "w") as fp:
fp.write(list_template % ids_as_defines)
os.makedirs(args.base_output, exist_ok=True)
for fn, invo_list in invocations.items():
out_fn = os.path.join(args.base_output, fn)
ig = re.sub("[^a-zA-Z0-9]", "_", "Z_INCLUDE_SYSCALLS_" + fn).upper()
include_guard = "#ifndef %s\n#define %s\n" % (ig, ig)
tracing_include = ""
if fn not in notracing:
tracing_include = "#include <tracing/tracing_syscall.h>"
header = syscall_template.format(include_guard=include_guard, tracing_include=tracing_include, invocations="\n\n".join(invo_list))
with open(out_fn, "w") as fp:
fp.write(header)
# Likewise emit _mrsh.c files for syscall inclusion
for fn in mrsh_defs:
mrsh_fn = os.path.join(args.base_output, fn + "_mrsh.c")
with open(mrsh_fn, "w") as fp:
fp.write("/* auto-generated by gen_syscalls.py, don't edit */\n")
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
fp.write("#pragma GCC diagnostic push\n")
fp.write("#endif\n")
fp.write("#ifdef __GNUC__\n")
fp.write("#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n")
fp.write("#endif\n")
fp.write(mrsh_includes[fn] + "\n")
fp.write("\n")
fp.write(mrsh_defs[fn] + "\n")
fp.write("#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n")
fp.write("#pragma GCC diagnostic pop\n")
fp.write("#endif\n")
if __name__ == "__main__":
main()
| galak/zephyr | scripts/gen_syscalls.py | Python | apache-2.0 | 17,833 |
#!/usr/bin/env python
import os
import pwd
def get_username():
return pwd.getpwuid(os.getuid())[0]
| cboyce93/epitome-xl | src/util/username.py | Python | gpl-3.0 | 105 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Davivienda Account Banking',
'version': '0.1',
'license': 'AGPL-3',
'author': 'ClearCorp',
'website': 'http://www.clearcorp.co.cr',
'category': 'Accounting & Finance',
'depends': [
'account_banking_ccorp',
],
'init_xml': [],
'update_xml': [],
'demo_xml': [],
'description': 'Module intended to import Davivienda\'s statements files',
'active': False,
'installable': True,
}
| ClearCorp-dev/odoo-costa-rica | l10n_cr_account_banking_cr_davivienda/__openerp__.py | Python | agpl-3.0 | 1,500 |
# GromacsWrapper: test_example.py
# Copyright (c) 2009 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
import pytest
import gromacs
def test_version():
release = gromacs.__version__
assert isinstance(release, str)
| Becksteinlab/GromacsWrapper | tests/test_version.py | Python | gpl-3.0 | 318 |
"""Standard test data.
For more information, see
- http://www.wiley.com/legacy/wileychi/pesarin/material.html
"""
import os as _os
import numpy as np
from .. import data_dir
__all__ = ['load',
'kenya', ]
def load(f):
r"""Load a data file located in the data directory.
Parameters
----------
f : string
File name.
Returns
-------
x : array like
Data loaded from permute.data_dir.
"""
return np.recfromcsv(_os.path.join(data_dir, f), delimiter=",", encoding=None)
def nsgk():
r"""NSGK test data for irr.
Notes
-----
Here is first 5 lines of `nsgk.csv`::
time_stamp,domain,video,rater
1,8,1,1
1,12,1,1
1,15,1,1
1,20,1,1
"""
nz = np.loadtxt(_os.path.join(data_dir, "nsgk.csv"),
delimiter=',', skiprows=1, dtype=np.int32)
shape = tuple(nz.max(axis=0))
x = np.zeros(shape, dtype=np.int32)
nz -= 1
for r in nz:
x[tuple(r)] = 1
# given order: time_stamp,domain,video,rater
# desired order: domain,video,rater,time_stamp
x = x.transpose(1, 2, 3, 0)
# hardcoding the number of timestamps per video
time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]
p1 = [[m[:, :time_stamps[i]] for i, m in enumerate(n)]for n in x]
## Alternatively, I could return a 2D object array with
## rater x time_stamp(video) matrices as entries
## Not sure which is better, so I will wait to see how I use it.
# p1 = np.zeros(x.shape[:2], dtype=object)
# for i, n in enumerate(x):
# for j, m in enumerate(n):
# p1[i, j] = m
return p1
def macnell2014():
r"""Data from MacNell et al. 2014
.. Lillian MacNell, Adam Driscoll, and Andrea N Hunt, "What's
in a Name: Exposing Gender Bias in Student Ratings of Teaching,"
Innovative Higher Education, pp. 1-13, 2014.
"""
return load("MacNell2014.csv")
def clinical_trial():
r"""Data from Ottoboni et al. 2018
.. Kellie Ottoboni, Fraser Lewis, and Luigi Salmaso, "An Empirical
Comparison of Parametric and Permutation Tests for Regression
Analysis of Randomized Experiments," Statistics in
Biopharmaceutical Research, 2018.
"""
return load("rb_clinical_trial.csv")
# def another_poss():
# nz = np.loadtxt(_os.path.join(data_dir, "nsgk.csv"),
# delimiter=',', skiprows=1, dtype=np.int)
# _, nd, nv, nr = tuple(nz.max(axis=0))
# dv = np.zeros((nd, nv), dtype=object)
# time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]
# for n in range(nd):
# for v in range(nv):
# dv[n, v] = np.zeros((nr, time_stamps[v]), dtype=np.int)
# nz -= 1
# for _ts, _d, _v, _r in nz:
# dv[_d, _v][_r, _ts] = 1
#
def botulinum():
r"""The
"""
return load(_os.path.join("npc", "botulinum.csv"))
def chrom17m():
r"""The
"""
return load(_os.path.join("npc", "chrom17m.csv"))
def confocal():
"""The
"""
return load(_os.path.join("npc", "confocal.csv"))
def germina():
"""The
"""
return load(_os.path.join("npc", "germina.csv"))
def kenya():
"""The Kenya dataset contains 16 observations and two variables in total.
It concerns an anthropological study on the "Ol Molo" and "Kamba"
populations.
"""
return load(_os.path.join("npc", "kenya.csv"))
def massaro_blair():
"""The
"""
return load(_os.path.join("npc", "massaro_blair.csv"))
def monachus():
"""The
"""
return load(_os.path.join("npc", "monachus.csv"))
def mult():
"""The
"""
return load(_os.path.join("npc", "mult.csv"))
def perch():
"""The
"""
return load(_os.path.join("npc", "perch.csv"))
def rats():
"""The
"""
return load(_os.path.join("npc", "rats.csv"))
def setig():
"""The
"""
return load(_os.path.join("npc", "setig.csv"))
def urology():
"""The
"""
return load(_os.path.join("npc", "urology.csv"))
def washing_test():
"""The
"""
return load(_os.path.join("npc", "washing_test.csv"))
def waterfalls():
"""The
"""
return load(_os.path.join("npc", "waterfalls.csv"))
def ipat():
"""The IPAT dataset from Pesarin and Salmaso Chapter 1
"""
return load(_os.path.join("npc", "examples_chapters_1-4", "ipat.csv"))
def job():
"""The job satisfaction dataset from Pesarin and Salmaso Chapter 1
"""
return load(_os.path.join("npc", "examples_chapters_1-4", "job.csv"))
def fly():
"""The fly dataset from Pesarin and Salmaso Chapter 4
"""
return load(_os.path.join("npc", "examples_chapters_1-4", "fly.csv"))
def testosterone():
"""The testosterone dataset from Pesarin and Salmaso Chapter 2
"""
return load(_os.path.join("npc", "examples_chapters_1-4", "testosterone.csv"))
def worms():
"""The worms dataset from Pesarin and Salmaso Chapter 1
"""
return load(_os.path.join("npc", "examples_chapters_1-4", "worms.csv"))
| jarrodmillman/permute | permute/data/__init__.py | Python | bsd-2-clause | 5,034 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Information on the graph plugin."""
# This name is used as the plugin prefix route and to identify this plugin
# generally, and is also the `plugin_name` for run graphs after data-compat
# transformations.
PLUGIN_NAME = "graphs"
# The Summary API is implemented in TensorFlow because it uses TensorFlow internal APIs.
# As a result, this SummaryMetadata is a bit unconventional and uses non-public
# hardcoded name as the plugin name. Please refer to link below for the summary ops.
# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L757
PLUGIN_NAME_RUN_METADATA = "graph_run_metadata"
# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L788
PLUGIN_NAME_RUN_METADATA_WITH_GRAPH = "graph_run_metadata_graph"
# https://github.com/tensorflow/tensorflow/blob/565952cc2f17fdfd995e25171cf07be0f6f06180/tensorflow/python/ops/summary_ops_v2.py#L825
PLUGIN_NAME_KERAS_MODEL = "graph_keras_model"
# Plugin name used for `Event.tagged_run_metadata`. This doesn't fall into one
# of the above cases because (despite the name) `PLUGIN_NAME_RUN_METADATA` is
# _required_ to have both profile and op graphs, whereas tagged run metadata
# need only have profile data.
PLUGIN_NAME_TAGGED_RUN_METADATA = "graph_tagged_run_metadata"
# In the context of the data provider interface, tag name given to a
# graph read from the `graph_def` field of an `Event` proto, which is
# not attached to a summary and thus does not have a proper tag name of
# its own. Run level graphs always represent `GraphDef`s (graphs of
# TensorFlow ops), never conceptual graphs, profile graphs, etc. This is
# the only tag name used by the `"graphs"` plugin.
RUN_GRAPH_NAME = "__run_graph__"
| tensorflow/tensorboard | tensorboard/plugins/graph/metadata.py | Python | apache-2.0 | 2,502 |
# coding: utf-8
"""
Only use this if you want to update a bootstrapV.M.py file to a newer virtualenv!
Usage:
/path/to/specific/version/of/python gen.py
"""
import sys
import virtualenv
EXTENSION = """
# coding: utf-8
import os
from os.path import abspath, basename, dirname, join, pardir
import subprocess
# get current dir
def adjust_options(options, args):
BOOTSTRAP_PATH = abspath(dirname(__file__))
# erase args
while len(args):
args.pop()
# set virtualenv's dir
args.append(join(BOOTSTRAP_PATH, pardir))
# override default options
def extend_parser(parser):
parser.set_defaults(unzip_setuptools=True,
use_distribute=True)
# delegate the final hooks to an external script so we don't need to change this.
def after_install(options, home_dir):
from hooks import after_install
after_install(options, home_dir)
"""
# the below syntax works on both 2.6 and 2.7.
filename = "bootstrap{0}.{1}.py".format(*sys.version_info)
output = virtualenv.create_bootstrap_script(EXTENSION)
f = open(filename, 'w').write(output)
| eahneahn/free | bootstrap/gen.py | Python | agpl-3.0 | 1,091 |
#-*- coding: utf-8 -*-
from datetime import datetime
from django.template import defaultfilters
from django.utils.translation import ugettext as _
from django.utils.timezone import is_aware, utc
from .. import register
@register.filter(expects_localtime=True)
def shortnaturaltime(value):
"""
now, 1s, 1m, 1h, 1 Ene, 1 Ene 2012
"""
tz = utc if is_aware(value) else None
now = datetime.now(tz)
if value > now: # Future
return '%(delta)s' % {'delta': defaultfilters.date(value, 'j M \'y')}
delta = now - value
if delta.days:
if defaultfilters.date(now, 'y') == defaultfilters.date(value, 'y'):
return '%(delta)s' % {'delta': defaultfilters.date(value, 'j M')}
return '%(delta)s' % {'delta': defaultfilters.date(value, 'j M \'y')}
if not delta.seconds:
return _('now')
count = delta.seconds
if count < 60:
return _('%(count)ss') % {'count': count}
count //= 60
if count < 60:
return _('%(count)sm') % {'count': count}
count //= 60
return _('%(count)sh') % {'count': count} | Si-elegans/Web-based_GUI_Tools | spirit/templatetags/tags/utils/time.py | Python | apache-2.0 | 1,103 |
# (c) 2016 Open Source Geospatial Foundation - all rights reserved
# (c) 2014 - 2015 Centre for Maritime Research and Experimentation (CMRE)
# (c) 2013 - 2014 German Aerospace Center (DLR)
# This code is licensed under the GPL 2.0 license, available at the root
# application directory.
import ftplib
import pickle
import md5
import os
import string
import logging # if not std, http://www.red-dove.com/python_logging.html
import path # http://www.jorendorff.com/articles/python/path
import upload
__author__ = "Ned Batchelder"
__copyright__ = "Copyright 2016 Open Source Geospatial Foundation - all rights reserved"
__license__ = "GPL"
"""
FtpUpload
Upload files via FTP based on their content changing.
Based on original code by
Ned Batchelder
http://www.nedbatchelder.com
version = '1.0a'
Modified by
Alessio Fabiani, GeoSolutions S.A.S.
"""
class EzFtp:
"""
A simplified interface to ftplib.
Lets you use full pathnames, with server-side
directory management handled automatically.
"""
def __init__(self, ftp):
self.ftp = ftp
self.serverDir = ''
def setRoot(self, dir):
"""
Set the remote directory that we'll call the root.
"""
self.cd(dir, create=True)
self.ftp.cwd("..")
self.ftp.cwd(dir)
def cd(self, dir, create=1):
"""
Change the directory on the server, if need be.
If create is true, directories are created if necessary to get to the full path.
Returns true if the directory is changed.
"""
if dir != self.serverDir:
# Move up to the common root.
while not dir.startswith(self.serverDir):
logging.info("ftpcd ..")
self.ftp.cwd("..")
self.serverDir = os.path.split(self.serverDir)[0]
# Move down to the right directory
doDirs = dir[len(self.serverDir):]
for d in string.split(doDirs, os.sep):
if d:
try:
logging.info("ftpcd %s" % d)
self.ftp.cwd(d)
except BaseException:
if create:
logging.info("ftpmkdir %s" % d)
self.ftp.mkd(d)
self.ftp.cwd(d)
else:
return 0
self.serverDir = os.path.join(self.serverDir, d)
return 1
def putasc(self, this, that):
"""
Put a text file to the server.
"""
thatDir, thatFile = os.path.split(that)
self.cd(thatDir)
f = open(this, "r")
logging.info("ftpstorasc %s" % that)
try:
self.ftp.storlines("STOR %s" % (thatFile), f)
except Exception as e:
logging.exception(e)
raise
def putbin(self, this, that):
"""
Put a binary file to the server.
"""
thatDir, thatFile = os.path.split(that)
self.cd(thatDir)
f = open(this, "rb")
logging.info("ftpstorbin %s" % that)
try:
self.ftp.storbinary("STOR %s" % (thatFile), f, 1024)
except Exception as e:
logging.exception(e)
raise
def delete(self, that):
"""
Delete a file on the server.
"""
thatDir, thatFile = os.path.split(that)
if self.cd(thatDir, 0):
logging.info("ftpdel %s" % that)
try:
self.ftp.delete(thatFile)
except BaseException:
pass
def quit(self):
"""
Quit.
"""
self.ftp.quit()
class FtpUpload(upload.Upload):
"""
Provides intelligent FTP uploading of files, using MD5 hashes to track
which files have to be uploaded. Each upload is recorded in a local
file so that the next upload can skip the file if its contents haven't
changed. File timestamps are ignored, allowing regenerated files to
be properly uploaded only if their contents have changed.
Call `setHost` and `setMd5File` to establish the settings for a session,
then `upload` for each set of files to upload. If you want to have
removed local files automatically delete the remote files, call
`deleteOldFiles` once, then `finish` to perform the closing bookkeeping.
::
fu = FtpUpload(config, 'ftp.myhost.com', 'myusername', 'password')
fu.setHost('ftp.myhost.com', 'myusername', 'password')
fu.setMd5File('myhost.md5') # optional
fu.upload(
hostdir='www', src='.',
text='*.html *.css', binary='*.gif *.jpg'
)
# more upload() calls can go here..
fu.deleteOldFiles()
fu.finish()
"""
def __init__(self, host, username, password, id="master"):
upload.Upload.__init__(self, host, username, password, id)
self.ftp = None
self.ezftp = None
self.md5file = None
self.md5DictIn = {}
self.md5DictOut = {}
self.md5DictUp = {}
# self.setHost(host, username, password)
def setHost(self, host, username, password):
"""
Set the host, the username and password.
"""
if not self.ftp:
try:
hoststr, portstr = host.split(':')
except BaseException:
hoststr = host
portstr = None
self.ftp = ftplib.FTP()
self.ftp.set_debuglevel(3)
if portstr:
port = int(portstr)
self.ftp.connect(hoststr, port)
else:
self.ftp.connect(hoststr, 21)
self.ftp.login(username, password)
self.ftp.set_pasv(1)
def setMd5File(self, md5file):
"""
Assign a filename to use for the MD5 tracking.
"""
self.md5file = md5file
if self.md5file:
try:
inf = open(self.md5file, "r")
self.md5DictIn = pickle.load(inf)
self.md5DictUp.update(self.md5DictIn)
inf.close()
except IOError:
self.md5DictIn = {}
def Upload(self,
hostdir='.',
text='*.*',
binary='',
src='.'
):
"""
Upload a set of files.
Source files are found in the directory named by `src`
(and its subdirectories recursively). The files are uploaded
to the directory named by `hostdir` on the remote host.
Files that match one of the space-separated patterns in `text`
are uploaded as text files, those that match the patterns in
`binary` are uploaded as binary files.
This method can be called a number of times to upload different
sets of files to or from different directories within the same
FtpUpload session.
"""
if not self.ezftp:
if not self.ftp:
self.setHost(self.host, self.username, self.password)
self.ezftp = EzFtp(self.ftp)
if hostdir != '.':
self.ezftp.setRoot('/')
self.ezftp.setRoot(hostdir)
# patdict is a dict of fnmatch patterns to ftp function.
patdict = {}
for pat in text.split():
patdict[pat] = self.ezftp.putasc
for pat in binary.split():
patdict[pat] = self.ezftp.putbin
# Walk the tree, putting files to the ezftp.
srcpath = path.path(src)
for thispath in srcpath.walkfiles():
thatpath = hostdir + os.sep + srcpath.relpathto(thispath)
logging.info("thatpath %s" % thatpath)
thatpathstr = str(thatpath)
# Compute this file's MD5 fingerprint
m = md5.new()
f = open(thispath, "rb")
for l in f.readlines():
m.update(l)
thisMd5 = m.hexdigest()
# What was the last MD5 fingerprint?
thatMd5 = self.md5DictIn.get(thatpathstr, '')
# If the current file is different, then put it to the server.
if thisMd5 != thatMd5:
# Find the pattern the file matches, and use the ftp function
# from the map.
for pat in patdict.keys():
if thispath.fnmatch(pat):
ftpfn = patdict[pat]
ftpfn(thispath, thatpath)
# Remember the new fingerprint.
self.md5DictOut[thatpathstr] = thisMd5
self.md5DictUp[thatpathstr] = thisMd5
def deleteOldFiles(self):
"""
Delete any remote files that we have uploaded previously but
that weren't considered in this FtpUpload session. This doesn't
touch files that exist on the remote host but were never uploaded
by this module.
"""
# Files in md5DictIn but not in md5DictOut must have been removed.
for this in self.md5DictIn:
if this not in self.md5DictOut:
self.ezftp.delete(this)
del self.md5DictUp[this]
def finish(self):
"""
Do our final bookkeeping.
"""
# Done with ftp'ing.
self.ezftp.quit()
# Write the md5 control file out for next time.
if self.md5file:
outf = open(self.md5file, "w")
pickle.dump(self.md5DictUp, outf)
outf.close()
| geoserver/wps-remote | src/wpsremote/ftpUpload.py | Python | gpl-2.0 | 9,550 |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""An implementation of a data store based on mysql.
--------------->>>>>>>>>>>>>>> DEPRECATED <<<<<<<<<<<<<<<---------------
Do not use!!!!
This datastore will be removed in a future version of GRR.
"""
import Queue
import threading
import time
import MySQLdb
from MySQLdb import cursors
import logging
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import utils
# pylint: disable=nonstandard-exception
class Error(data_store.Error):
"""Base class for all exceptions in this module."""
# pylint: enable=nonstandard-exception
class MySQLConnection(object):
"""A Class to manage MySQL database connections."""
def __init__(self, queue=None):
self.queue = queue
try:
self._MakeConnection(database=config_lib.CONFIG["Mysql.database_name"])
except MySQLdb.OperationalError as e:
# Database does not exist
if "Unknown database" in str(e):
dbh = self._MakeConnection()
cursor = dbh.cursor()
cursor.execute("Create database `%s`" %
config_lib.CONFIG["Mysql.database_name"])
self._MakeConnection(database=config_lib.CONFIG["Mysql.database_name"])
else:
raise
def _MakeConnection(self, database=""):
try:
connection_args = dict(
user=config_lib.CONFIG["Mysql.database_username"],
db=database, charset="utf8",
passwd=config_lib.CONFIG["Mysql.database_password"],
cursorclass=cursors.DictCursor)
if config_lib.CONFIG["Mysql.host"]:
connection_args["host"] = config_lib.CONFIG["Mysql.host"]
if config_lib.CONFIG["Mysql.port"]:
connection_args["port"] = config_lib.CONFIG["Mysql.port"]
self.dbh = MySQLdb.connect(**connection_args)
self.cursor = self.dbh.cursor()
self.cursor.connection.autocommit(True)
return self.dbh
except MySQLdb.OperationalError as e:
# This is a fatal error, we just raise the top level exception here.
if "Access denied" in str(e):
raise Error(str(e))
raise
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
if self.queue:
self.queue.put(self)
def Execute(self, *args):
"""Executes a query."""
retries = 10
for _ in range(1, retries):
try:
self.cursor.execute(*args)
return self.cursor.fetchall()
except MySQLdb.Error:
time.sleep(.2)
try:
database = config_lib.CONFIG["Mysql.database_name"]
self._MakeConnection(database=database)
except MySQLdb.OperationalError:
pass
# If something goes wrong at this point, we just let it raise.
self.cursor.execute(*args)
return self.cursor.fetchall()
class ConnectionPool(object):
"""A pool of connections to the mysql server.
Usage:
with data_store.DB.pool.GetConnection() as connection:
connection.Execute(.....)
"""
def __init__(self, pool_size=5):
self.connections = Queue.Queue()
for _ in range(pool_size):
self.connections.put(MySQLConnection(self.connections))
def GetConnection(self):
return self.connections.get(block=True)
class MySQLDataStore(data_store.DataStore):
"""A mysql based data store."""
POOL = None
def __init__(self):
logging.warning("Starting MySQLDataStore. This Datastore is DEPRECATED!")
logging.warning("This datastore will be removed!!!")
logging.warning("Recommended alternatives include MySQLAdvancedDataStore")
logging.warning("and HTTPDataStore.")
# Use the global connection pool.
if MySQLDataStore.POOL is None:
MySQLDataStore.POOL = ConnectionPool()
self.pool = self.POOL
self.lock = threading.Lock()
self.to_set = []
self.table_name = config_lib.CONFIG["Mysql.table_name"]
super(MySQLDataStore, self).__init__()
def Initialize(self):
with self.pool.GetConnection() as connection:
try:
connection.Execute("desc `%s`" % self.table_name)
except MySQLdb.Error:
self.RecreateDataBase()
def DropDatabase(self):
"""Drops the database table."""
with self.pool.GetConnection() as connection:
try:
connection.Execute("drop table `%s`" % self.table_name)
except MySQLdb.OperationalError:
pass
def RecreateDataBase(self):
"""Drops the table and creates a new one."""
self.DropDatabase()
with self.pool.GetConnection() as connection:
connection.Execute("""
CREATE TABLE IF NOT EXISTS `%s` (
hash BINARY(32) DEFAULT NULL,
subject VARCHAR(4096) CHARACTER SET utf8 DEFAULT NULL,
prefix VARCHAR(256) CHARACTER SET utf8 DEFAULT NULL,
attribute VARCHAR(4096) CHARACTER SET utf8 DEFAULT NULL,
age BIGINT(22) UNSIGNED DEFAULT NULL,
value_string TEXT CHARACTER SET utf8 NULL,
value_binary LONGBLOB NULL,
value_integer BIGINT(22) UNSIGNED DEFAULT NULL,
KEY `hash` (`hash`),
KEY `prefix` (`prefix`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT ='Table representing AFF4 objects';
""" % config_lib.CONFIG["Mysql.table_name"])
connection.Execute("CREATE INDEX attribute ON `%s` (attribute(300));" %
config_lib.CONFIG["Mysql.table_name"])
def DeleteAttributes(self, subject, attributes, start=None, end=None,
sync=True, token=None):
"""Remove some attributes from a subject."""
_ = sync # Unused
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
if not attributes:
return
with self.pool.GetConnection() as cursor:
query = ("delete from `%s` where hash=md5(%%s) and "
"subject=%%s and attribute in (%s) " % (
self.table_name,
",".join(["%s"] * len(attributes))))
args = [subject, subject] + list(attributes)
if start or end is not None:
query += " and age >= %s and age <= %s"
args.append(int(start or 0))
mysql_unsigned_bigint_max = 18446744073709551615
if end is None:
end = mysql_unsigned_bigint_max
args.append(int(end))
cursor.Execute(query, args)
def DeleteSubject(self, subject, sync=False, token=None):
_ = sync
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
with self.pool.GetConnection() as cursor:
query = ("delete from `%s` where hash=md5(%%s) and subject=%%s " %
self.table_name)
args = [subject, subject]
cursor.Execute(query, args)
def Flush(self):
with self.lock:
to_set = self.to_set
self.to_set = []
self._MultiSet(to_set)
def Size(self):
database_name = config_lib.CONFIG["Mysql.database_name"]
query = ("SELECT table_schema, Sum(data_length + index_length) `size` "
"FROM information_schema.tables "
"WHERE table_schema = \"%s\" GROUP by table_schema" %
database_name)
with self.pool.GetConnection() as cursor:
result = cursor.Execute(query, [])
if len(result) != 1:
return -1
return int(result[0]["size"])
def Escape(self, string):
"""Escape the string so it can be interpolated into an sql statement."""
# This needs to come from a connection object so it is escaped according to
# the current charset:
with self.pool.GetConnection() as cursor:
return cursor.dbh.escape(string)
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None,
token=None):
"""Resolves multiple attributes at once for one subject."""
self.security_manager.CheckDataStoreAccess(
token, [subject], self.GetRequiredResolveAccess(attributes))
with self.pool.GetConnection() as cursor:
query = ("select * from `%s` where hash = md5(%%s) and "
"subject = %%s and attribute in (%s) " % (
self.table_name,
",".join(["%s"] * len(attributes)),
))
args = [subject, subject] + attributes[:]
query += self._TimestampToQuery(timestamp, args)
if limit:
query += " LIMIT %d" % limit
result = cursor.Execute(query, args)
for row in result:
subject = row["subject"]
value = self.DecodeValue(row)
yield row["attribute"], value, rdfvalue.RDFDatetime(row["age"])
def _TimestampToQuery(self, timestamp, args):
"""Convert the timestamp to a query fragment and add args."""
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
query = " order by age desc "
elif timestamp == self.ALL_TIMESTAMPS:
query = " order by age desc "
elif isinstance(timestamp, (tuple, list)):
query = " and age >= %s and age <= %s order by age desc "
args.append(int(timestamp[0]))
args.append(int(timestamp[1]))
return query
def MultiResolvePrefix(self, subjects, attribute_prefix, timestamp=None,
limit=None, token=None):
self.security_manager.CheckDataStoreAccess(
token, subjects, self.GetRequiredResolveAccess(attribute_prefix))
if not subjects:
return {}
with self.pool.GetConnection() as cursor:
query = "select * from `%s` where hash in (%s) and subject in (%s) " % (
self.table_name, ",".join(["md5(%s)"] * len(subjects)),
",".join(["%s"] * len(subjects)),
)
# Allow users to specify a single string here.
if isinstance(attribute_prefix, basestring):
attribute_pattern = [attribute_prefix + "%"]
else:
attribute_pattern = [prefix + "%" for prefix in attribute_prefix]
query += "and (" + " or ".join(
["attribute like %s"] * len(attribute_pattern)) + ")"
args = list(subjects) + list(subjects) + list(attribute_pattern)
query += self._TimestampToQuery(timestamp, args)
seen = set()
result = {}
remaining_limit = limit
for row in cursor.Execute(query, args):
subject = row["subject"]
value = self.DecodeValue(row)
# Only record the latest results. This is suboptimal since it always
# returns all the results from the db. Can we do better with better SQL?
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
if (row["attribute"], row["subject"]) in seen:
continue
else:
seen.add((row["attribute"], row["subject"]))
result.setdefault(subject, []).append((row["attribute"], value,
row["age"]))
if remaining_limit:
remaining_limit -= 1
if remaining_limit == 0:
break
return result.iteritems()
def MultiSet(self, subject, values, timestamp=None, replace=True,
sync=True, to_delete=None, token=None):
"""Set multiple attributes' values for this subject in one operation."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
to_delete = set(to_delete or [])
if timestamp is None:
timestamp = time.time() * 1e6
# Prepare a bulk insert operation.
subject = utils.SmartUnicode(subject)
to_set = []
# Build a document for each unique timestamp.
for attribute, sequence in values.items():
for value in sequence:
entry_timestamp = None
if isinstance(value, tuple):
value, entry_timestamp = value
if entry_timestamp is None:
entry_timestamp = timestamp
attribute = utils.SmartUnicode(attribute)
prefix = attribute.split(":", 1)[0]
# Replacing means to delete all versions of the attribute first.
if replace:
to_delete.add(attribute)
to_set.extend(
[subject, subject, int(entry_timestamp), attribute, prefix] +
self._Encode(attribute, value))
if to_delete:
self.DeleteAttributes(subject, to_delete, token=token)
if to_set:
if sync:
self._MultiSet(to_set)
else:
with self.lock:
self.to_set.extend(to_set)
def _MultiSet(self, values):
if not values:
return
query = ("insert into `%s` (hash, subject, age, attribute, prefix, "
"value_string, value_integer, value_binary) values " %
self.table_name)
nr_items = len(values) / 8
query += ", ".join(["(md5(%s), %s, %s, %s, %s, %s, %s, %s)"] * nr_items)
with self.pool.GetConnection() as cursor:
cursor.Execute(query, values)
def _Encode(self, attribute, value):
"""Return a list encoding this value."""
try:
if isinstance(value, int):
return [None, value, None]
elif isinstance(value, unicode):
return [value, None, None]
elif attribute.attribute_type.data_store_type in (
"integer", "unsigned_integer"):
return [None, int(value), None]
elif attribute.attribute_type.data_store_type == "string":
return [utils.SmartUnicode(value), None, None]
elif attribute.attribute_type.data_store_type == "bytes":
return [None, None, utils.SmartStr(value)]
except AttributeError:
try:
return [None, None, value.SerializeToString()]
except AttributeError:
return [None, None, utils.SmartStr(value)]
def EncodeValue(self, attribute, value):
"""Returns the value encoded into the correct fields."""
result = {}
try:
if isinstance(value, int):
result["value_integer"] = value
elif isinstance(value, unicode):
result["value_string"] = value
elif attribute.attribute_type.data_store_type in (
"integer", "unsigned_integer"):
result["value_integer"] = int(value)
elif attribute.attribute_type.data_store_type == "string":
result["value_string"] = utils.SmartUnicode(value)
elif attribute.attribute_type.data_store_type == "bytes":
result["value_binary"] = utils.SmartStr(value)
except AttributeError:
try:
result["value_binary"] = value.SerializeToString()
except AttributeError:
result["value_binary"] = utils.SmartStr(value)
return result
def DecodeValue(self, row):
"""Decode the value from the row object."""
value = row["value_string"]
if value is None:
value = row["value_integer"]
if value is None:
value = row["value_binary"]
return value
def Transaction(self, subject, lease_time=None, token=None):
return MySQLTransaction(self, subject, lease_time=lease_time, token=token)
class MySQLTransaction(data_store.CommonTransaction):
"""The Mysql data store transaction object.
This object does not aim to ensure ACID like consistently. We only ensure that
two simultaneous locks can not be held on the same AFF4 subject.
This means that the first thread which grabs the lock is considered the owner
of the transaction. Any subsequent transactions on the same subject will fail
immediately with data_store.TransactionError.
A lock is considered expired after a certain time.
"""
def __init__(self, store, subject, lease_time=None, token=None):
"""Ensure we can take a lock on this subject."""
super(MySQLTransaction, self).__init__(store, subject,
lease_time=lease_time, token=token)
if lease_time is None:
lease_time = config_lib.CONFIG["Datastore.transaction_timeout"]
self.lock_time = lease_time
self.table_name = store.table_name
with store.pool.GetConnection() as connection:
self.expires_lock = int((time.time() + self.lock_time) * 1e6)
# This will take over the lock if the lock is too old.
connection.Execute(
"update `%s` set value_integer=%%s where "
"attribute='transaction' and subject=%%s and hash=md5(%%s) and "
"(value_integer < %%s)" % self.table_name,
(self.expires_lock, subject, subject, time.time() * 1e6))
self.CheckForLock(connection, subject)
def UpdateLease(self, lease_time):
self.expires_lock = int((time.time() + lease_time) * 1e6)
with self.store.pool.GetConnection() as connection:
# This will take over the lock if the lock is too old.
connection.Execute(
"update `%s` set value_integer=%%s where "
"attribute='transaction' and subject=%%s and hash=md5(%%s)" %
self.table_name, (self.expires_lock, self.subject, self.subject))
def CheckLease(self):
return max(0, self.expires_lock / 1e6 - time.time())
def CheckForLock(self, connection, subject):
"""Checks that the lock has stuck."""
for row in connection.Execute(
"select * from `%s` where subject=%%s and hash=md5(%%s) and "
"attribute='transaction'" % self.table_name, (subject, subject)):
# We own this lock now.
if row["value_integer"] == self.expires_lock:
return
# Someone else owns this lock.
else:
raise data_store.TransactionError("Subject %s is locked" % subject)
# If we get here the row does not exist:
connection.Execute(
"insert ignore into `%s` set value_integer=%%s, "
"attribute='transaction', subject=%%s, hash=md5(%%s) " %
self.table_name, (self.expires_lock, self.subject, self.subject))
self.CheckForLock(connection, subject)
def Abort(self):
self._RemoveLock()
def Commit(self):
super(MySQLTransaction, self).Commit()
self._RemoveLock()
def _RemoveLock(self):
# Remove the lock on the document. Note that this only resets the lock if
# we actually hold it (value_integer == self.expires_lock).
with self.store.pool.GetConnection() as connection:
connection.Execute(
"update `%s` set value_integer=0 where "
"attribute='transaction' and value_integer=%%s and hash=md5(%%s) and "
"subject=%%s" % self.table_name,
(self.expires_lock, self.subject, self.subject))
| pombredanne/grr | lib/data_stores/mysql_data_store.py | Python | apache-2.0 | 17,981 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
NED Query Tool
==============
Module containing a series of functions that execute queries to the NASA
Extragalactic Database (NED):
.. topic:: Revision History
Refactored using common API as a part of Google Summer of Code 2013.
:Originally contributed by:
K. Willett, Jun 2011
:Acknowledgements:
Based off Adam Ginsburg's Splatalogue search routine:
https://github.com/keflavich/agpy/blob/master/agpy/query_splatalogue.py
Service URLs to acquire the VO Tables are taken from Mazzarella et
al. (2007) The National Virtual Observatory: Tools and Techniques
for Astronomical Research, ASP Conference Series, Vol. 382., p.165
"""
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.ipac.ned`.
"""
server = _config.ConfigItem(
['http://ned.ipac.caltech.edu/cgi-bin/'],
'Name of the NED server to use.')
timeout = _config.ConfigItem(
60,
'Time limit for connecting to NED server.')
# Set input parameters of choice
hubble_constant = _config.ConfigItem(
[73, 70.5],
'Value of the Hubble Constant for many NED queries.')
"""
The correct redshift for NED queries may be chosen by specifying numbers
1, 2, 3 and 4, having the following meanings:
(1) To the Reference Frame defined by the 3K CMB
(2) To the Reference Frame defined by the Virgo Infall only
(3) To the Reference Frame defined by the (Virgo + GA) only
(4) To the Reference Frame defined by the (Virgo + GA + Shapley)
"""
correct_redshift = _config.ConfigItem(
[1, 2, 3, 4],
'The correct redshift for NED queries, see comments above.')
# Set output parameters of choice
output_coordinate_frame = _config.ConfigItem(
['Equatorial',
'Ecliptic',
'Galactic',
'SuperGalactic'],
'Frame in which to display the coordinates in the output.')
output_equinox = _config.ConfigItem(
['J2000.0', 'B1950.0'],
'Equinox for the output coordinates.')
sort_output_by = _config.ConfigItem(
["RA or Longitude",
"DEC or Latitude",
"GLON",
"GLAT",
"Redshift - ascending",
"Redshift - descending"],
'Display output sorted by this criteria.')
conf = Conf()
from .core import Ned, NedClass
__all__ = ['Ned', 'NedClass',
'Conf', 'conf',
]
| ceb8/astroquery | astroquery/ipac/ned/__init__.py | Python | bsd-3-clause | 2,571 |
import json
import logging
import hashlib
from luigi.s3 import S3Client
import os
import sys
import shutil
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from edx.analytics.tasks.url import url_path_join, get_target_from_url
from edx.analytics.tasks.tests.acceptance.services import fs, db, task, hive, vertica
log = logging.getLogger(__name__)
class AcceptanceTestCase(unittest.TestCase):
acceptance = 1
NUM_MAPPERS = 4
NUM_REDUCERS = 2
def setUp(self):
try:
self.s3_client = S3Client()
except Exception:
self.s3_client = None
config_json = os.getenv('ACCEPTANCE_TEST_CONFIG')
try:
with open(config_json, 'r') as config_json_file:
self.config = json.load(config_json_file)
except (IOError, TypeError):
try:
self.config = json.loads(config_json)
except TypeError:
self.config = {}
# The name of an existing job flow to run the test on
assert('job_flow_name' in self.config or 'host' in self.config)
# The git URL of the pipeline repository to check this code out from.
assert('tasks_repo' in self.config)
# The branch of the pipeline repository to test. Note this can differ from the branch that is currently
# checked out and running this code.
assert('tasks_branch' in self.config)
# Where to store logs generated by the pipeline
assert('tasks_log_path' in self.config)
# The user to connect to the job flow over SSH with.
assert('connection_user' in self.config)
# Where the pipeline should output data, should be a URL pointing to a directory.
assert('tasks_output_url' in self.config)
# Allow for parallel execution of the test by specifying a different identifier. Using an identical identifier
# allows for old virtualenvs to be reused etc, which is why a random one is not simply generated with each run.
assert('identifier' in self.config)
# A URL to a JSON file that contains most of the connection information for the MySQL database.
assert('credentials_file_url' in self.config)
# A URL to a build of the oddjob third party library
assert 'oddjob_jar' in self.config
# A URL to a maxmind compatible geolocation database file
assert 'geolocation_data' in self.config
self.data_dir = os.path.join(os.path.dirname(__file__), 'fixtures')
url = self.config['tasks_output_url']
m = hashlib.md5()
m.update(self.config['identifier'])
self.identifier = m.hexdigest()
self.test_root = url_path_join(url, self.identifier, self.__class__.__name__)
self.test_src = url_path_join(self.test_root, 'src')
self.test_out = url_path_join(self.test_root, 'out')
self.catalog_path = 'http://acceptance.test/api/courses/v2'
database_name = 'test_' + self.identifier
schema = 'test_' + self.identifier
import_database_name = 'acceptance_import_' + database_name
export_database_name = 'acceptance_export_' + database_name
otto_database_name = 'acceptance_otto_' + database_name
self.warehouse_path = url_path_join(self.test_root, 'warehouse')
task_config_override = {
'hive': {
'database': database_name,
'warehouse_path': self.warehouse_path
},
'map-reduce': {
'marker': url_path_join(self.test_root, 'marker')
},
'manifest': {
'path': url_path_join(self.test_root, 'manifest'),
'lib_jar': self.config['oddjob_jar']
},
'database-import': {
'credentials': self.config['credentials_file_url'],
'destination': self.warehouse_path,
'database': import_database_name
},
'database-export': {
'credentials': self.config['credentials_file_url'],
'database': export_database_name
},
'otto-database-import': {
'credentials': self.config['credentials_file_url'],
'database': otto_database_name
},
'course-catalog': {
'catalog_path': self.catalog_path
},
'geolocation': {
'geolocation_data': self.config['geolocation_data']
},
'event-logs': {
'source': self.test_src
},
'course-structure': {
'api_root_url': 'acceptance.test',
'access_token': 'acceptance'
}
}
if 'vertica_creds_url' in self.config:
task_config_override['vertica-export'] = {
'credentials': self.config['vertica_creds_url'],
'schema': schema
}
if 'manifest_input_format' in self.config:
task_config_override['manifest']['input_format'] = self.config['manifest_input_format']
log.info('Running test: %s', self.id())
log.info('Using executor: %s', self.config['identifier'])
log.info('Generated Test Identifier: %s', self.identifier)
self.import_db = db.DatabaseService(self.config, import_database_name)
self.export_db = db.DatabaseService(self.config, export_database_name)
self.otto_db = db.DatabaseService(self.config, otto_database_name)
self.task = task.TaskService(self.config, task_config_override, self.identifier)
self.hive = hive.HiveService(self.task, self.config, database_name)
self.vertica = vertica.VerticaService(self.config, schema)
if os.getenv('DISABLE_RESET_STATE', 'false').lower() != 'true':
self.reset_external_state()
def reset_external_state(self):
# The machine running the acceptance test suite may not have hadoop installed on it, so convert S3 paths (which
# are normally handled by the hadoop DFS client) to S3+https paths, which are handled by the python native S3
# client.
root_target = get_target_from_url(self.test_root.replace('s3://', 's3+https://'))
if root_target.exists():
root_target.remove()
self.import_db.reset()
self.export_db.reset()
self.otto_db.reset()
self.hive.reset()
self.vertica.reset()
def upload_tracking_log(self, input_file_name, file_date):
# Define a tracking log path on S3 that will be matched by the standard event-log pattern."
input_file_path = url_path_join(
self.test_src,
'FakeServerGroup',
'tracking.log-{0}.gz'.format(file_date.strftime('%Y%m%d'))
)
with fs.gzipped_file(os.path.join(self.data_dir, 'input', input_file_name)) as compressed_file_name:
self.upload_file(compressed_file_name, input_file_path)
def upload_file(self, local_file_name, remote_file_path):
log.debug('Uploading %s to %s', local_file_name, remote_file_path)
with get_target_from_url(remote_file_path).open('w') as remote_file:
with open(local_file_name, 'r') as local_file:
shutil.copyfileobj(local_file, remote_file)
def upload_file_with_content(self, remote_file_path, content):
log.debug('Writing %s from string', remote_file_path)
with get_target_from_url(remote_file_path).open('w') as remote_file:
remote_file.write(content)
def execute_sql_fixture_file(self, sql_file_name, database=None):
if database is None:
database = self.import_db
log.debug('Executing SQL fixture %s on %s', sql_file_name, database.database_name)
database.execute_sql_file(os.path.join(self.data_dir, 'input', sql_file_name))
| sssllliang/edx-analytics-pipeline | edx/analytics/tasks/tests/acceptance/__init__.py | Python | agpl-3.0 | 7,923 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class CreditSummaryReport(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(CreditSummaryReport, self).__init__(cr, uid, name,
context=context)
self.localcontext.update({
'time': time,
'cr': cr,
'uid': uid,
})
report_sxw.report_sxw(
'report.credit_control_summary',
'credit.control.communication',
'addons/account_credit_control/report/credit_control_summary.html.mako',
parser=CreditSummaryReport
)
| rschnapka/account-financial-tools | account_credit_control/report/credit_control_summary.py | Python | agpl-3.0 | 1,559 |
# -*- coding: utf-8 -*-
def test_assert_false(testdir):
"""Test pytest does not display captured stderr on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_assert_true(testdir):
"""Test pytest does not display captured stderr on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr and stdout is not displayed
for line in result.stdout.lines:
assert "Captured stderr call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_setup_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_setup_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_setup_function_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup function failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_setup_function_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup function failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# print(result.stdout.lines)
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_teardown_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_teardown_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_teardown_function_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_teardown_function_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_hide_capture_log_call(testdir):
"""Test pytest does not display captured log call on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import daiquiri
import sys
import datetime
import logging
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
daiquiri.setup(
level=logging.INFO,
outputs=(daiquiri.output.File(file_name),),
)
logger = daiquiri.getLogger(__name__)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured log call is not displayed
for line in result.stdout.lines:
assert "Captured log call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
assert "Captured stderr call" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
| hamzasheikh/pytest-hidecaptured | tests/test_hidecaptured.py | Python | mit | 22,940 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import distutils.dir_util
class Masurca(Package):
"""MaSuRCA is whole genome assembly software. It combines the efficiency
of the de Bruijn graph and Overlap-Layout-Consensus (OLC)
approaches."""
homepage = "http://www.genome.umd.edu/masurca.html"
url = "ftp://ftp.genome.umd.edu/pub/MaSuRCA/latest/MaSuRCA-3.2.3.tar.gz"
version('3.2.6', 'f068f91e33fd7381de406a7a954bfe01')
version('3.2.3', 'd9b4419adfe6b64e42ce986253a50ff5')
depends_on('perl', type=('build', 'run'))
depends_on('boost')
depends_on('zlib')
def install(self, spec, prefix):
installer = Executable('./install.sh')
installer()
distutils.dir_util.copy_tree(".", prefix)
| tmerrick1/spack | var/spack/repos/builtin/packages/masurca/package.py | Python | lgpl-2.1 | 1,973 |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_context import context
CONF = cfg.CONF
class RequestContext(context.RequestContext):
"""User security context object
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, project=None, **kwargs):
if project:
kwargs['tenant'] = project
self.project = project
super(RequestContext, self).__init__(**kwargs)
def to_dict(self):
out_dict = super(RequestContext, self).to_dict()
out_dict['roles'] = self.roles
if out_dict.get('tenant'):
out_dict['project'] = out_dict['tenant']
out_dict.pop('tenant')
return out_dict
@classmethod
def from_dict(cls, values):
return cls(**values)
def get_context():
"""A helper method to get a blank context (useful for tests)."""
return RequestContext(user_id=None,
project_id=None,
roles=[],
is_admin=False,
overwrite=False)
| att-comdev/deckhand | deckhand/context.py | Python | apache-2.0 | 1,765 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._storage_accounts_operations import build_check_name_availability_request, build_create_request_initial, build_delete_request, build_failover_request_initial, build_get_properties_request, build_list_account_sas_request, build_list_by_resource_group_request, build_list_keys_request, build_list_request, build_list_service_sas_request, build_regenerate_key_request, build_restore_blob_ranges_request_initial, build_revoke_user_delegation_keys_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_name_availability(
self,
account_name: "_models.StorageAccountCheckNameAvailabilityParameters",
**kwargs: Any
) -> "_models.CheckNameAvailabilityResult":
"""Checks that the storage account name is valid and is not already in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name:
~azure.mgmt.storage.v2021_02_01.models.StorageAccountCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
request = build_check_name_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_name_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'StorageAccountCreateParameters')
request = build_create_request_initial(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Asynchronously creates a new storage account with the specified parameters. If an account is
already created and a subsequent create request is issued with different properties, the
account properties will be updated. If an account is already created and a subsequent create or
update request is issued with the exact same set of properties, the request will succeed.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2021_02_01.models.StorageAccount]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
@distributed_trace_async
async def get_properties(
self,
resource_group_name: str,
account_name: str,
expand: Optional[Union[str, "_models.StorageAccountExpand"]] = None,
**kwargs: Any
) -> "_models.StorageAccount":
"""Returns the properties for the specified storage account including but not limited to name, SKU
name, location, and account status. The ListKeys operation should be used to retrieve storage
keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: May be used to expand the properties within account's properties. By default,
data is not included when fetching properties. Currently we only support geoReplicationStats
and blobRestoreStatus.
:type expand: str or ~azure.mgmt.storage.v2021_02_01.models.StorageAccountExpand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs: Any
) -> "_models.StorageAccount":
"""The update operation can be used to update the SKU, encryption, access tier, or tags for a
storage account. It can also be used to map the account to a custom domain. Only one custom
domain is supported per storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must be cleared/unregistered
before a new value can be set. The update of multiple properties is supported. This call does
not change the storage keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage account cannot be
changed after creation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("StorageAccountListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_02_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("StorageAccountListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
@distributed_trace_async
async def list_keys(
self,
resource_group_name: str,
account_name: str,
expand: Optional[str] = "kerb",
**kwargs: Any
) -> "_models.StorageAccountListKeysResult":
"""Lists the access keys or Kerberos keys (if active directory enabled) for the specified storage
account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: Specifies type of the key to be listed. Possible value is kerb. The default
value is "kerb".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_keys_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignore
@distributed_trace_async
async def regenerate_key(
self,
resource_group_name: str,
account_name: str,
regenerate_key: "_models.StorageAccountRegenerateKeyParameters",
**kwargs: Any
) -> "_models.StorageAccountListKeysResult":
"""Regenerates one of the access keys or Kerberos keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated -- key1, key2,
kerb1, kerb2.
:type regenerate_key:
~azure.mgmt.storage.v2021_02_01.models.StorageAccountRegenerateKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
request = build_regenerate_key_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.regenerate_key.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignore
@distributed_trace_async
async def list_account_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.AccountSasParameters",
**kwargs: Any
) -> "_models.ListAccountSasResponse":
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials for the storage account.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.AccountSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListAccountSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.ListAccountSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AccountSasParameters')
request = build_list_account_sas_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_account_sas.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignore
@distributed_trace_async
async def list_service_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.ServiceSasParameters",
**kwargs: Any
) -> "_models.ListServiceSasResponse":
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS credentials.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.ServiceSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListServiceSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.ListServiceSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ServiceSasParameters')
request = build_list_service_sas_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_service_sas.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignore
async def _failover_initial(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_failover_request_initial(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self._failover_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
@distributed_trace_async
async def begin_failover(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Failover request can be triggered for a storage account in case of availability issues. The
failover occurs from the storage account's primary cluster to secondary cluster for RA-GRS
accounts. The secondary cluster will become primary after failover.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._failover_initial(
resource_group_name=resource_group_name,
account_name=account_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
async def _restore_blob_ranges_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.BlobRestoreParameters",
**kwargs: Any
) -> "_models.BlobRestoreStatus":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobRestoreStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BlobRestoreParameters')
request = build_restore_blob_ranges_request_initial(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._restore_blob_ranges_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restore_blob_ranges_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges'} # type: ignore
@distributed_trace_async
async def begin_restore_blob_ranges(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.BlobRestoreParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.BlobRestoreStatus"]:
"""Restore blobs in the specified blob ranges.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for restore blob ranges.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.BlobRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BlobRestoreStatus or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2021_02_01.models.BlobRestoreStatus]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobRestoreStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restore_blob_ranges_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BlobRestoreStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restore_blob_ranges.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/restoreBlobRanges'} # type: ignore
@distributed_trace_async
async def revoke_user_delegation_keys(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
"""Revoke user delegation keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_user_delegation_keys_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.revoke_user_delegation_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
revoke_user_delegation_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys'} # type: ignore
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/aio/operations/_storage_accounts_operations.py | Python | mit | 50,857 |
from decimal import Decimal, InvalidOperation
from django.utils.translation import ugettext as _
from corehq.apps.commtrack.dbaccessors import get_supply_point_case_by_location
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.consumption.shortcuts import get_default_consumption, set_default_consumption_for_supply_point
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
from corehq.apps.custom_data_fields.edit_entity import add_prefix
from .exceptions import LocationImportError
from .models import Location, LocationType
from .forms import LocationForm
from .util import parent_child
class LocationCache(object):
"""
Used to cache locations in memory during a bulk upload for optimization
"""
def __init__(self, domain):
self.domain = domain
# {(type,parent): {name: location}}
self._existing_by_type = {}
# {id: location}
self._existing_by_id = {}
def get(self, id):
if id not in self._existing_by_id:
self._existing_by_id[id] = Location.get(id)
return self._existing_by_id[id]
def get_by_name(self, loc_name, loc_type, parent):
key = (loc_type, parent._id if parent else None)
if key not in self._existing_by_type:
existing = list(Location.filter_by_type(self.domain, loc_type, parent))
self._existing_by_type[key] = dict((l.name, l) for l in existing)
self._existing_by_id.update(dict((l._id, l) for l in existing))
return self._existing_by_type[key].get(loc_name, None)
def add(self, location):
for id in location.path + [None]:
# this just mimics the behavior in the couch view
key = (location.location_type, id)
if key in self._existing_by_type:
self._existing_by_type[key][location.name] = location
def top_level_location_types(domain):
"""
Return all location types which do not have
any potential parents
"""
from corehq.apps.locations.util import location_hierarchy_config
hierarchy = location_hierarchy_config(domain)
return [t[0] for t in hierarchy if t[1] == [None]]
class LocationImporter(object):
def __init__(self, domain, excel_importer):
self.domain = domain
self.excel_importer = excel_importer
self.processed = 0
self.results = []
self.seen_site_codes = set()
self.parent_child_map = parent_child(self.domain)
self.total_rows = sum(
ws.worksheet.get_highest_row() for ws in self.excel_importer.worksheets
)
self.types = [ws.worksheet.title for ws in self.excel_importer.worksheets]
self.top_level_types = top_level_location_types(domain)
def run(self):
for loc_type in self.top_level_types:
self.import_loc_type(loc_type)
return self.results
def import_loc_type(self, loc_type):
if loc_type in self.types:
self.import_worksheet(
self.excel_importer.worksheets[self.types.index(loc_type)]
)
if loc_type in self.parent_child_map:
for child_type in self.parent_child_map[loc_type]:
self.import_loc_type(child_type)
def import_worksheet(self, worksheet):
location_type = worksheet.worksheet.title
if not LocationType.objects.filter(
domain=self.domain,
name=location_type,
).exists():
self.results.append(_(
"Location with type {location_type} not found, this worksheet \
will not be imported"
).format(
location_type=location_type
)
)
else:
for loc in worksheet:
if 'site_code' in loc:
# overwrite this value in the dict so we don't
# ever accidentally use a randomly capitalized veersion
loc['site_code'] = loc['site_code'].lower()
if 'site_code' in loc and loc['site_code'] in self.seen_site_codes:
self.results.append(_(
"Location {name} with site code {site_code} could not \
be imported due to duplicated site codes in the excel \
file"
).format(
name=loc['name'],
site_code=loc['site_code']
))
else:
if 'site_code' in loc:
self.seen_site_codes.add(loc['site_code'])
self.results.append(self.import_location(
location_type,
loc,
self.parent_child_map
)['message'])
self.excel_importer.add_progress()
def import_location(self, location_type, location_data, parent_child_map=None):
data = dict(location_data)
provided_code = data.pop('site_code', None)
parent_site_code = data.pop('parent_site_code', None)
if not parent_child_map:
parent_child_map = parent_child(self.domain)
form_data = {}
try:
parent_id = self._process_parent_site_code(
parent_site_code, location_type, parent_child_map
)
except LocationImportError as e:
return {
'id': None,
'message': _('Unable to import location {0}: {1}').format(
data.pop('name'), e
)
}
existing = None
parent = parent_id
if provided_code:
existing = Location.by_site_code(self.domain, provided_code)
if existing:
if existing.location_type != location_type:
return {
'id': None,
'message': _("Existing location type error, type of {0} is not {1}").format(
existing.name, location_type
)
}
parent = parent_id or existing.parent_id
form_data['site_code'] = provided_code
form_data['parent_id'] = parent
form_data['name'] = data.pop('name')
form_data['location_type'] = location_type
lat, lon = data.pop('latitude', None), data.pop('longitude', None)
if lat and lon:
form_data['coordinates'] = '%s, %s' % (lat, lon)
consumption = data.get('consumption', {}).items()
metadata = data.get('data', {})
metadata.update(data.get('uncategorized_data', {}))
form_data.update(add_prefix(metadata))
return self.submit_form(
parent,
form_data,
existing,
location_type,
consumption,
)
def _process_parent_site_code(self, parent_site_code, location_type, parent_child_map):
if not parent_site_code:
return None
parent_obj = Location.by_site_code(self.domain, parent_site_code.lower())
if parent_obj:
if invalid_location_type(location_type, parent_obj, parent_child_map):
raise LocationImportError(
_('Invalid parent type of {0} for child type {1}').format(
parent_obj.location_type,
location_type
)
)
else:
return parent_obj._id
else:
raise LocationImportError(
_('Parent with site code {0} does not exist in this project')
.format(parent_site_code)
)
@memoized
def get_product(self, code):
return Product.get_by_code(self.domain, code)
def no_changes_needed(self, existing, form_data, consumption):
if not existing:
return False
for key, val in form_data.iteritems():
if getattr(existing, key, None) != val:
return False
for product_code, val in consumption:
product = self.get_product(product_code)
if get_default_consumption(
self.domain,
product._id,
existing.location_type,
existing._id
) != val:
return False
return True
def submit_form(self, parent, form_data, existing, location_type, consumption):
location = existing or Location(domain=self.domain, parent=parent)
form = LocationForm(location, form_data)
form.strict = False # optimization hack to turn off strict validation
if form.is_valid():
# don't save if there is nothing to save
if self.no_changes_needed(existing, form_data, consumption):
return {
'id': existing._id,
'message': 'no changes for %s %s' % (location_type, existing.name)
}
loc = form.save()
sp = get_supply_point_case_by_location(loc) if consumption else None
if consumption and sp:
for product_code, value in consumption:
product = self.get_product(product_code)
if not product:
# skip any consumption column that doesn't match
# to a real product. currently there is no easy
# way to alert here, though.
continue
try:
amount = Decimal(value)
# only set it if there is a non-negative/non-null value
if amount and amount >= 0:
set_default_consumption_for_supply_point(
self.domain,
product._id,
sp._id,
amount
)
except (TypeError, InvalidOperation):
# should inform user, but failing hard due to non numbers
# being used on consumption is strange since the
# locations would be in a very inconsistent state
continue
if existing:
message = 'updated %s %s' % (location_type, loc.name)
else:
message = 'created %s %s' % (location_type, loc.name)
return {
'id': loc._id,
'message': message
}
else:
message = 'Form errors when submitting: '
for k, v in form.errors.iteritems():
if k != '__all__':
message += u'{0} {1}; {2}: {3}. '.format(
location_type, form_data.get('name', 'unknown'), k, v[0]
)
return {
'id': None,
'message': message
}
def import_locations(domain, excel_importer):
location_importer = LocationImporter(domain, excel_importer)
results = location_importer.run()
return results
def invalid_location_type(location_type, parent_obj, parent_relationships):
return (
parent_obj.location_type not in parent_relationships or
location_type not in parent_relationships[parent_obj.location_type]
)
| puttarajubr/commcare-hq | corehq/apps/locations/bulk.py | Python | bsd-3-clause | 11,525 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Views for OAuth."""
from flask import url_for
from flask_admin import Admin
from invenio_db import db
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.admin import remote_account_adminview, \
remote_token_adminview, user_identity_adminview
def test_admin(app):
"""Test flask-admin interace."""
InvenioOAuthClient(app)
assert isinstance(remote_account_adminview, dict)
assert isinstance(remote_token_adminview, dict)
assert isinstance(user_identity_adminview, dict)
assert 'model' in remote_account_adminview
assert 'modelview' in remote_account_adminview
assert 'model' in remote_token_adminview
assert 'modelview' in remote_token_adminview
assert 'model' in user_identity_adminview
assert 'modelview' in user_identity_adminview
admin = Admin(app, name='Test')
user_model = remote_account_adminview.pop('model')
user_view = remote_account_adminview.pop('modelview')
admin.add_view(user_view(user_model, db.session,
**remote_account_adminview))
with app.app_context():
# create user and save url for testing
request_url = url_for('remoteaccount.index_view')
with app.app_context():
with app.test_client() as client:
res = client.get(
request_url,
follow_redirects=True
)
assert res.status_code == 200
assert 'Extra Data' in str(res.get_data())
assert 'Tokens' in str(res.get_data())
| tiborsimko/invenio-oauthclient | tests/test_admin.py | Python | mit | 1,773 |
from django.contrib import admin
from arcane.browse.models import Track, Album, Artist, Genre, Location
admin.site.register(Track)
admin.site.register(Artist)
admin.site.register(Album)
admin.site.register(Genre)
admin.site.register(Location)
# Register your models here.
| ArcaneStreaming/Arcane-Server | arcane/browse/admin.py | Python | gpl-3.0 | 273 |
from nose.tools import assert_equal, assert_true
from ggplot.themes.element_target import *
def test_element_target1():
txt = text()
y = axis_text_y()
assert_true(len(txt.__class__.__mro__) > len(y.__class__.__mro__))
def test_element_target2():
txt = text()
x = axis_text_x()
y = axis_text_y()
targets = [y, x, txt]
assert_equal(sorted_element_targets(targets), [txt, y, x])
def test_element_target3():
txt1 = text()
txt2 = text()
x1 = axis_text_x()
x2 = axis_text_x()
y1 = axis_text_y()
targets = sorted_element_targets([txt1, x1, y1, txt2, x2])
assert_equal(targets, [txt1, txt2, x1, y1, x2])
def test_element_target4():
x = axis_text_x()
y = axis_text_y()
assert_equal(len(x.__class__.__mro__), len(y.__class__.__mro__))
def test_element_target5():
txt1 = text()
txt2 = text()
x1 = axis_text_x()
x2 = axis_text_x()
y1 = axis_text_y()
targets = unique_element_targets(sorted_element_targets([txt1, x1, y1, txt2, x2]))
assert_equal(targets, [txt2, y1, x2])
def test_element_target6():
txt1 = text()
txt2 = text()
x1 = axis_text_x()
x2 = axis_text_x()
y1 = axis_text_y()
targets = unique_element_targets(sorted_element_targets([txt1, x1, y1, txt2, x2]))
assert_equal(targets, [txt2, y1, x2])
| Cophy08/ggplot | ggplot/tests/test_element_target.py | Python | bsd-2-clause | 1,333 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0x538c9a03
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
import tensorflow as tf
#####################################
# batch_norm
#####################################
def dense_batch_norm(*args, **kwargs):
name = kwargs.pop("name", None)
activation = kwargs.pop("activation", None)
batch_norm = kwargs.pop("batch_norm", {})
with tf.variable_scope(name, "DenseBatchNorm"):
net = tf.layers.dense(*args, **kwargs)
net = tf.layers.batch_normalization(net, **batch_norm)
return activation(net) if activation else net
def conv2d_batch_norm(*args, **kwargs):
name = kwargs.pop("name", None)
activation = kwargs.pop("activation", None)
batch_norm = kwargs.pop("batch_norm", {})
with tf.variable_scope(name, default_name="Conv2dBatchNorm"):
net = tf.layers.conv2d(*args, **kwargs)
net = tf.layers.batch_normalization(net, **batch_norm)
return activation(net) if activation else net
#####################################
# fire
#####################################
def fire(inputs, squeeze_filters, expand_1x1_filters, expand_3x3_filters, **kwargs):
name = kwargs.pop("name", None)
with tf.variable_scope(name, default_name="Fire"):
# squeeze
squeeze = tf.layers.conv2d(inputs, squeeze_filters, [1, 1], **kwargs)
# expand
kwargs["padding"] = "same"
expand_1x1 = tf.layers.conv2d(squeeze, expand_1x1_filters, [1, 1], **kwargs)
expand_3x3 = tf.layers.conv2d(squeeze, expand_3x3_filters, [3, 3], **kwargs)
return tf.concat([expand_1x1, expand_3x3], axis=3)
def fire_batch_norm(inputs, squeeze_filters, expand_1x1_filters, expand_3x3_filters, **kwargs):
name = kwargs.pop("name", None)
with tf.variable_scope(name, default_name="FireBatchNorm"):
# squeeze
squeeze = conv2d_batch_norm(inputs, squeeze_filters, [1, 1], **kwargs)
# expand
kwargs["padding"] = "same"
expand_1x1 = conv2d_batch_norm(squeeze, expand_1x1_filters, [1, 1], **kwargs)
expand_3x3 = conv2d_batch_norm(squeeze, expand_3x3_filters, [3, 3], **kwargs)
return tf.concat([expand_1x1, expand_3x3], axis=3)
#####################################
# dense_block
#####################################
def conv2d_densenet_layer(net, growth_rate, bottleneck, batch_norm, dropout, activation, **kwargs):
with tf.variable_scope(None, default_name="Conv2dDenseNetlayer"):
net = tf.layers.batch_normalization(net, **batch_norm)
net = activation(net) if activation else net
if bottleneck:
net = tf.layers.conv2d(net, bottleneck, [1, 1], **kwargs)
net = tf.layers.dropout(net, **dropout)
net = tf.layers.batch_normalization(net, **batch_norm)
net = activation(net) if activation else net
net = tf.layers.conv2d(net, growth_rate, [3, 3], **kwargs)
net = tf.layers.dropout(net, **dropout)
return net
def conv2d_densenet_transition(net, compression, batch_norm, dropout, activation, **kwargs):
filters = int(net.get_shape()[-1])
if compression:
if compression <= 1:
filters = int(filters * compression)
else:
filters = compression
with tf.variable_scope(None, default_name="TransitionLayer"):
net = tf.layers.batch_normalization(net, **batch_norm)
net = activation(net) if activation else net
net = tf.layers.conv2d(net, filters, [1, 1], **kwargs)
net = tf.layers.dropout(net, **dropout)
return net
def conv2d_dense_block(net, growth_rate, n_layers, **kwargs):
name = kwargs.pop("name", None)
bottleneck = kwargs.pop("bottleneck", None)
compression = kwargs.pop("compression", None)
batch_norm = kwargs.pop("batch_norm", {})
dropout = kwargs.pop("dropout", {})
activation = kwargs.pop("activation")
with tf.variable_scope(name, default_name="Conv2dDenseNetBlock"):
for layers in range(n_layers):
layer = conv2d_densenet_layer(net, growth_rate, bottleneck, batch_norm, dropout, activation, **kwargs)
net = tf.concat([net, layer], axis=3)
net = conv2d_densenet_transition(net, compression, batch_norm, dropout, activation, **kwargs)
return net
#####################################
# densefire_block
#####################################
def conv2d_densefire_layer(net, bottleneck, growth_rate_1x1, growth_rate_3x3, batch_norm, dropout, activation, **kwargs):
with tf.variable_scope(None, default_name="Conv2dDenseFireLayer"):
net = tf.layers.batch_normalization(net, **batch_norm)
net = activation(net) if activation else net
# squeeze
net = tf.layers.conv2d(net, bottleneck, [1, 1], **kwargs)
net = tf.layers.dropout(net, **dropout)
net = tf.layers.batch_normalization(net, **batch_norm)
net = activation(net) if activation else net
# expand
expand_1x1 = tf.layers.conv2d(net, growth_rate_1x1, [1, 1], **kwargs)
expand_3x3 = tf.layers.conv2d(net, growth_rate_3x3, [3, 3], **kwargs)
# concat
net = tf.concat([expand_1x1, expand_3x3], axis=3)
net = tf.layers.dropout(net, **dropout)
return net
def conv2d_densefire_block(net, bottleneck, growth_rate_1x1, growth_rate_3x3, n_layers, **kwargs):
name = kwargs.pop("name", None)
compression = kwargs.pop("compression", None)
batch_norm = kwargs.pop("batch_norm", {})
dropout = kwargs.pop("dropout", {})
activation = kwargs.pop("activation")
with tf.variable_scope(name, default_name="Conv2dDenseFireBlock"):
for layers in range(n_layers):
layer = conv2d_densefire_layer(net, bottleneck, growth_rate_1x1, growth_rate_3x3, batch_norm, dropout, activation, **kwargs)
net = tf.concat([net, layer], axis=3)
net = conv2d_densenet_transition(net, compression, batch_norm, dropout, activation, **kwargs)
return net
#####################################
# ensemble_dropout
#####################################
def layer_dropout(net, **kwargs):
name = kwargs.pop("name", None)
with tf.name_scope(name, default_name="LayerDropout"):
shape = tf.shape(net)
batche_size = shape[0]
ones_shape = [batche_size] + ([1] * (len(net.get_shape()) - 1))
ones = tf.ones(shape=ones_shape)
return net * tf.layers.dropout(ones, **kwargs)
def ensemble_dropout(networks, **kwargs):
return (list)((_coconut.functools.partial(map, _coconut_partial(layer_dropout, {}, 1, **kwargs)))(networks))
if __name__ == '__main__':
sess = tf.Session()
training = tf.placeholder(tf.bool, shape=())
x = tf.random_uniform(shape=(16, 3, 2))
# f = fire(x, 32, 64, 64, activation=tf.nn.relu)
# fb = fire_batch_norm(x, 32, 64, 64, activation=tf.nn.relu, batch_norm=dict(training=True))
# print(f)
# print(fb)
e = ensemble_dropout([x], rate=0.5, training=training)
print(e)
print(sess.run(e, feed_dict={training: True}))
| cgarciae/tfinterface | tfinterface/layers.py | Python | mit | 7,862 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def get_file_text(path):
""" Returns file text by path"""
file_io = open(path, "r")
text = file_io.read()
file_io.close()
return text
def get_file_output(encoding="utf-8", path=sys.argv[-1], arg_string=""):
"""
Returns answer file output
:param encoding: to decode output in python3
:param path: path of file to execute
:return: list of strings
"""
import subprocess
proc = subprocess.Popen([sys.executable, path], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if arg_string:
for arg in arg_string.split("\n"):
proc.stdin.write(bytearray(str(arg) + "\n", encoding))
proc.stdin.flush()
return list(map(lambda x: str(x.decode(encoding)), proc.communicate()[0].splitlines()))
def test_file_importable():
""" Tests there is no obvious syntax errors"""
path = sys.argv[-1]
if not path.endswith(".py"):
import os
parent = os.path.abspath(os.path.join(path, os.pardir))
python_files = [f for f in os.listdir(parent) if os.path.isfile(os.path.join(parent, f)) and f.endswith(".py")]
for python_file in python_files:
if python_file == "tests.py":
continue
check_importable_path(os.path.join(parent, python_file))
return
check_importable_path(path)
def check_importable_path(path):
""" Checks that file is importable.
Reports failure otherwise.
"""
saved_input = patch_input()
try:
import_file(path)
except:
failed("The file contains syntax errors", test_file_importable.__name__)
return
finally:
revert_input(saved_input)
passed(test_file_importable.__name__)
def patch_input():
def mock_fun(_m=""):
return "mock"
if sys.version_info[0] == 3:
import builtins
save_input = builtins.input
builtins.input = mock_fun
return save_input
elif sys.version_info[0] == 2:
import __builtin__
save_input = __builtin__.raw_input
__builtin__.raw_input = mock_fun
__builtin__.input = mock_fun
return save_input
def revert_input(saved_input):
if sys.version_info[0] == 3:
import builtins
builtins.input = saved_input
elif sys.version_info[0] == 2:
import __builtin__
__builtin__.raw_input = saved_input
__builtin__.input = saved_input
def import_file(path):
""" Returns imported file """
if sys.version_info[0] == 2 or sys.version_info[1] < 3:
import imp
return imp.load_source("tmp", path)
elif sys.version_info[0] == 3:
import importlib.machinery
return importlib.machinery.SourceFileLoader("tmp", path).load_module("tmp")
def import_task_file():
""" Returns imported file.
Imports file from which check action was run
"""
path = sys.argv[-1]
return import_file(path)
def test_is_not_empty():
"""
Checks that file is not empty
"""
path = sys.argv[-1]
file_text = get_file_text(path)
if len(file_text) > 0:
passed()
else:
failed("The file is empty. Please, reload the task and try again.")
def test_text_equals(text, error_text):
"""
Checks that answer equals text.
"""
path = sys.argv[-1]
file_text = get_file_text(path)
if file_text.strip() == text:
passed()
else:
failed(error_text)
def test_answer_placeholders_text_deleted(
error_text="Solution has empty answer prompt(s)."):
"""
Checks that all answer placeholders are not empty
"""
windows = get_answer_placeholders()
for window in windows:
if len(window) == 0:
failed(error_text)
return
passed()
def set_congratulation_message(message):
""" Overrides default 'Congratulations!' message """
print("#educational_plugin CONGRATS_MESSAGE " + message)
def failed(message="Please, reload the task and try again.", name=None):
""" Reports failure """
if not name:
name = sys._getframe().f_back.f_code.co_name
print("#educational_plugin " + name + " FAILED + " + message)
def passed(name=None):
""" Reports success """
if not name:
name = sys._getframe().f_back.f_code.co_name
print("#educational_plugin " + name + " test OK")
def get_answer_placeholders():
"""
Returns all answer placeholders text
"""
prefix = "#educational_plugin_window = "
path = sys.argv[-1]
import os
file_name_without_extension = os.path.splitext(path)[0]
windows_path = file_name_without_extension + "_windows"
windows = []
f = open(windows_path, "r")
window_text = ""
first = True
for line in f.readlines():
if line.startswith(prefix):
if not first:
windows.append(window_text.strip())
else:
first = False
window_text = line[len(prefix):]
else:
window_text += line
if window_text:
windows.append(window_text.strip())
f.close()
return windows
def check_samples(samples=()):
"""
Check script output for all samples. Sample is a two element list, where the first is input and
the second is output.
"""
for sample in samples:
if len(sample) == 2:
output = get_file_output(arg_string=str(sample[0]))
if "\n".join(output) != sample[1]:
failed(
"Test from samples failed: \n \n"
"Input:\n{}"
"\n \n"
"Expected:\n{}"
"\n \n"
"Your result:\n{}".format(str.strip(sample[0]), str.strip(sample[1]), "\n".join(output)))
return
set_congratulation_message("All test from samples passed. Now we are checking your solution on Stepik server.")
passed()
def run_common_tests(error_text="Please, reload file and try again"):
test_is_not_empty()
test_answer_placeholders_text_deleted()
test_file_importable()
| lukecwik/incubator-beam | learning/katas/python/test_helper.py | Python | apache-2.0 | 6,995 |
"""
This module contains functions for searching the data scraped from
mysupermarket.co.uk using pricesearch.scraper.
"""
from json import loads
from scrapy import log
def search_file(search_phrases, filename, match_fn=lambda item: output_match(item)):
"""Search the given JSON file for the given phrases, calling match_fn
each time a match is found
Keyword arguments:
search_phrases -- a list of lists of words to search
filename -- the full path to the scraped JSON data
match_fn -- function to call if a match is found
"""
with open(filename) as f:
content = f.readlines()
for line in content:
item = loads(line)
for phrase in search_phrases:
match = len(phrase)
for word in phrase:
match = match and 'title' in item and word.lower() in item[
'title'].lower()
if match:
match_fn(item)
def output_match(item):
"""Print the item using Scrapy's log.msg.
Requires log.start, or nothing will be output.
Keyword arguments:
item -- the item to print
"""
log.msg("%s %s %s %s" %
(item['title'], item['subtitle'], item['price'], item['unit_price']))
| hmcc/price-search | search/search.py | Python | mit | 1,249 |
# Write a program that reads a word and prints each character of the word on a sepa-
# rate line. For example, if the user provides the input "Harry" , the program prints
# H
# a
# r
# r
# y
inputWord = str(input("Enter a word: "))
for i in range(len(inputWord)):
print(inputWord[i]) | futurepr0n/Books-solutions | Python-For-Everyone-Horstmann/Chapter4-Loops/P4.8.py | Python | mit | 310 |
import os
DEBUG = True
TESTAPP_DIR = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = "testsecretkey"
if os.environ.get("DJANGO_DATABASE_ENGINE") == "postgresql":
DATABASES = {
"default": {"ENGINE": "django.db.backends.postgresql", "NAME": "attachments"}
}
elif os.environ.get("DJANGO_DATABASE_ENGINE") == "mysql":
DATABASES = {
"default": {"ENGINE": "django.db.backends.mysql", "NAME": "attachments"}
}
else:
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "tests.db"}}
MEDIA_ROOT = os.path.join(TESTAPP_DIR, "uploads")
ROOT_URLCONF = "attachments.tests.testapp.urls"
INSTALLED_APPS = [
"attachments",
"attachments.tests.testapp",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
]
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
)
MIDDLEWARE_CLASSES = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(TESTAPP_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.i18n",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
| bartTC/django-attachments | attachments/tests/testapp/settings.py | Python | bsd-3-clause | 1,859 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
from pyos7 import *
from typing import Any, Generator
SockGenerator = Generator[Any, Any, Any]
def handle_client(client: socket.socket, addr: tuple) -> SockGenerator:
print(f"connection from {addr}")
while True:
yield ReadWait(client)
data = client.recv(1024)
if not data:
break
yield WriteWait(client)
client.send(data)
client.close()
print(f"client closed")
def server(port: int) -> SockGenerator:
print(f"server starting ...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", port))
sock.listen(1024)
while True:
yield ReadWait(sock)
client, addr = sock.accept()
yield NewTask(handle_client(client, addr))
sched = Scheduler()
sched.new(server(5555))
sched.mainloop()
| ASMlover/study | python/coroutines/echogood2.py | Python | bsd-2-clause | 2,280 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_wait
short_description: Wait for a BIG-IP condition before continuing
description:
- You can wait for BIG-IP to be "ready". By "ready", we mean that BIG-IP is ready
to accept configuration.
- This module can take into account situations where the device is in the middle
of rebooting due to a configuration change.
version_added: 2.5
options:
timeout:
description:
- Maximum number of seconds to wait for.
- When used without other conditions it is equivalent of just sleeping.
- The default timeout is deliberately set to 2 hours because no individual
REST API.
default: 7200
delay:
description:
- Number of seconds to wait before starting to poll.
default: 0
sleep:
default: 1
description:
- Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.
msg:
description:
- This overrides the normal error message from a failure to meet the required conditions.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Wait for BIG-IP to be ready to take configuration
bigip_wait:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait a maximum of 300 seconds for BIG-IP to be ready to take configuration
bigip_wait:
timeout: 300
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait for BIG-IP to be ready, don't start checking for 10 seconds
bigip_wait:
delay: 10
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import datetime
import signal
import time
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
def hard_timeout(module, want, start):
elapsed = datetime.datetime.utcnow() - start
module.fail_json(
msg=want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
class Parameters(AnsibleF5Parameters):
returnables = [
'elapsed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
@property
def delay(self):
if self._values['delay'] is None:
return None
return int(self._values['delay'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def sleep(self):
if self._values['sleep'] is None:
return None
return int(self._values['sleep'])
class Changes(Parameters):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.execute()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _get_client_connection(self):
return F5Client(**self.module.params)
def execute(self):
signal.signal(
signal.SIGALRM,
lambda sig, frame: hard_timeout(self.module, self.want, start)
)
# setup handler before scheduling signal, to eliminate a race
signal.alarm(int(self.want.timeout))
start = datetime.datetime.utcnow()
if self.want.delay:
time.sleep(float(self.want.delay))
end = start + datetime.timedelta(seconds=int(self.want.timeout))
while datetime.datetime.utcnow() < end:
time.sleep(int(self.want.sleep))
try:
# The first test verifies that the REST API is available; this is done
# by repeatedly trying to login to it.
self.client = self._get_client_connection()
if not self.client:
continue
if self._device_is_rebooting():
# Wait for the reboot to happen and then start from the beginning
# of the waiting.
continue
if self._is_mprov_running_on_device():
self._wait_for_module_provisioning()
break
except Exception:
# The types of exception's we're handling here are "REST API is not
# ready" exceptions.
#
# For example,
#
# Typically caused by device starting up:
#
# icontrol.exceptions.iControlUnexpectedHTTPError: 404 Unexpected Error:
# Not Found for uri: https://localhost:10443/mgmt/tm/sys/
# icontrol.exceptions.iControlUnexpectedHTTPError: 503 Unexpected Error:
# Service Temporarily Unavailable for uri: https://localhost:10443/mgmt/tm/sys/
#
#
# Typically caused by a device being down
#
# requests.exceptions.SSLError: HTTPSConnectionPool(host='localhost', port=10443):
# Max retries exceeded with url: /mgmt/tm/sys/ (Caused by SSLError(
# SSLError("bad handshake: SysCallError(-1, 'Unexpected EOF')",),))
#
#
# Typically caused by device still booting
#
# raise SSLError(e, request=request)\nrequests.exceptions.SSLError:
# HTTPSConnectionPool(host='localhost', port=10443): Max retries
# exceeded with url: /mgmt/shared/authn/login (Caused by
# SSLError(SSLError(\"bad handshake: SysCallError(-1, 'Unexpected EOF')\",),)),
continue
else:
elapsed = datetime.datetime.utcnow() - start
self.module.fail_json(
msg=self.want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
elapsed = datetime.datetime.utcnow() - start
self.changes.update({'elapsed': elapsed.seconds})
return False
def _device_is_rebooting(self):
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "runlevel"'
)
try:
if '6' in output.commandResult:
return True
except AttributeError:
return False
def _wait_for_module_provisioning(self):
# To prevent things from running forever, the hack is to check
# for mprov's status twice. If mprov is finished, then in most
# cases (not ASM) the provisioning is probably ready.
nops = 0
# Sleep a little to let provisioning settle and begin properly
time.sleep(5)
while nops < 4:
try:
if not self._is_mprov_running_on_device():
nops += 1
else:
nops = 0
except Exception:
# This can be caused by restjavad restarting.
pass
time.sleep(10)
def _is_mprov_running_on_device(self):
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "ps aux | grep \'[m]prov\'"'
)
if hasattr(output, 'commandResult'):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
timeout=dict(default=7200, type='int'),
delay=dict(default=0, type='int'),
sleep=dict(default=1, type='int'),
msg=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| mheap/ansible | lib/ansible/modules/network/f5/bigip_wait.py | Python | gpl-3.0 | 10,531 |
from django.db import models
from django.contrib import admin
import datetime
from django.utils import timezone
from django.core.validators import RegexValidator
from django.contrib.auth.models import User, UserManager
from django.forms import ValidationError
from . import settings
# Create your models here.
DURATION_ZERO = datetime.time(hour=0)
DEFAULT_DURATION = datetime.time(hour=1)
DEFAULT_TIME = datetime.time(hour=12)
phoneValidator = RegexValidator(
regex=r'[0-9][0-9 ]+',
message='Not a valid phone number')
class CustomerManager(UserManager):
def get_by_natural_key(self, username):
"""
Enable serialisation without pk. Not needed ATM.
"""
return self.get(username=username)
def create_user(self,
username,
first_name,
last_name,
email,
phone,
date_of_birth,
gender,
notes,
password=None
):
"""
Creates and saves a Customer with the given particulars and password.
"""
if not username:
raise ValueError('Customers must have a username')
user = self.model(
email=self.normalize_email(email),
phone=phone,
date_of_birth=date_of_birth,
gender=gender,
notes=notes,
)
user.set_password(password)
user.save(using=self._db)
return user
class Customer(User):
""" Customer/Client/Patient details.
"""
# gender options
MALE = 'M'
FEMALE = 'F'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
# title options
MR = 'MR'
MRS = 'MRS'
MISS = 'MISS'
MS = 'MS'
DR = 'DR'
PROF = 'PROF'
REV = 'REV'
TITLE_CHOICES = (
(MR, 'Mr'),
(MRS, 'Mrs'),
(MISS, 'Miss'),
(MS, 'Ms'),
(DR, 'Dr'),
(PROF, 'Prof'),
(REV, 'Rev'),
)
class Meta(User.Meta):
verbose_name = 'Customer'
verbose_name_plural = 'Customers'
objects = CustomerManager()
title = models.CharField(
max_length=4,
choices=TITLE_CHOICES,
default=MRS,
)
phone = models.CharField(
max_length=20,
validators=[phoneValidator],
blank=True,
null=True)
date_of_birth = models.DateField(blank=True, null=True)
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
default=FEMALE,
)
notes = models.TextField(blank=True)
def natural_key(self):
"""
Serialisation aid. Not needed ATM.
"""
return (self.username,)
def age(self):
"""
Age to the nearest year.
"""
if self.date_of_birth:
now = timezone.now()
return now.year - self.date_of_birth.year
return None
def __str__(self):
return '{0} {1}'.format(self.first_name, self.last_name)
class Resource(models.Model):
"""
A finite bookable resource, such as a room or piece of equipment.
TODO: May need to generalise this by adding ResourceType.
"""
name = models.CharField(max_length=40)
description = models.TextField(blank=True)
def __str__(self):
return '{0}'.format(self.name)
class Treatment(models.Model):
"""
A treatment.
Treatments are characterised by the resource(s) they need, and
the minimum time duration.
TODO: Currently the model assumes only one resource.
"""
name = models.CharField(max_length=40)
min_duration = models.DurationField(blank=True)
resource_required = models.BooleanField(default=False)
def __str__(self):
return '{0}'.format(self.name)
class Entry(models.Model):
"""
A diary entry, some event entered in the calendar.
Entries need to be able to compare times and do basic temporal arithmetic.
To do this (I think) we need to implement rich comparator methods so one
entry knows how to compare itself with another.
One possible (potentially undesirable) side-effect is that entries may
consider each other 'equal' when they have neither the same start time
nor the same duration. They will nevertheless be 'equivalent' in sharing
a portion of time.
"""
date = models.DateField(blank=False)
time = models.TimeField(blank=False, default=DEFAULT_TIME)
# kludge for duration enables using a time widget
# duration = models.DurationField(blank=True, default=DEFAULT_DURATION)
duration = models.TimeField(blank=True, default=DEFAULT_DURATION)
notes = models.TextField(blank=True)
creator = models.ForeignKey(
User,
blank=True,
null=True,
related_name='created_entries',
on_delete=models.CASCADE,
)
created = models.DateTimeField(auto_now_add=True)
editor = models.ForeignKey(
User,
blank=True,
null=True,
related_name='edited_entries',
on_delete=models.CASCADE,
)
edited = models.DateTimeField(auto_now=True)
customer = models.ForeignKey(
Customer,
blank=True,
null=True,
related_name='entries',
on_delete=models.CASCADE,
)
treatment = models.ForeignKey(
Treatment,
blank=True,
null=True,
on_delete=models.CASCADE,
)
resource = models.ForeignKey(
Resource,
blank=True,
null=True,
on_delete=models.CASCADE,
)
cancelled = models.BooleanField(default=False)
no_show = models.BooleanField(default=False)
def __str__(self):
name = '{0}'.format(
self.customer if self.customer
else self.creator
)
return ' - '.join(
[name, ('{0}'.format(self.treatment) if self.treatment else '')]
)
def short(self):
return ' - '.join([
('{0}'.format(self.resource) if self.resource else ''),
('{0}'.format(self.treatment) if self.treatment else ''),
(self.notes if self.notes else ''),
]
)
def duration_delta(self):
"""
Convert duration-as-time to duration-as-delta.
"""
the_zero = datetime.datetime.combine(self.date, DURATION_ZERO)
the_duration = datetime.datetime.combine(self.date, self.duration)
return the_duration - the_zero
def time_end(self):
"""
Calculate the time of the end of the entry from the start time and the
duration.
Sadly the naive method of adding the duration directly to the time
is not supported in python datetime arithmetic; a datetime object has
to be used.
"""
the_time = datetime.datetime.combine(self.date, self.time)
the_time_end = the_time + self.duration_delta()
return the_time_end.time()
def __eq__(self, other):
"""
Determine if the entries are 'eqivalent' (not necessarily mathematically
equal).
NOTE: time period end time is non-inclusive.
"""
# dates must be equal to start with
# TODO: note time rounding kludge
if (self.date.timetuple()[0:3] != other.date.timetuple()[0:3]):
return False
# time periods do not overlap; self happens before other
if (self.time < other.time and self.time_end() <= other.time):
return False
# time periods do not overlap; self happens after other
if (self.time > other.time and self.time >= other.time_end()):
return False
# anything else has to mean they overlap in time, right?
return True
def __hash__(self):
"""
Fix for hash becoming None when __eq__() overrides the parent class.
"""
return super().__hash__()
def validateResourceRequirement(self):
"""
Context validation of resource requirements.
If a treatment requires a resource, a resource must be specified.
"""
if self.treatment and self.treatment.resource_required:
if not self.resource:
raise ValidationError(
'Resource requirement is not met.'
)
def validateDuration(self):
"""
Context validation of duration.
Duration may be invalid if it is smaller than the minimum for the
treatment.
"""
if self.treatment and self.treatment.min_duration:
if (
not self.duration
or self.treatment.min_duration > self.duration_delta()
):
raise ValidationError(
'Duration must be at least the minimum treament time.'
)
def validateNoResourceConflicts(self):
"""
Context validation of date, time, duration and resource.
The entry is invalid if it clashes in time and resource with
a pre-existing entry. Cancelled entries don't count.
"""
if self.cancelled or self.no_show:
return
if self.resource:
# get the day's uncancelled entries sharing the same resource
savedEntries = Entry.objects.filter(
date=self.date,
resource=self.resource,
cancelled=False,
no_show=False,
)
# ensure no time clashes
for other in savedEntries:
if self == other:
# if we are just saving the same entry, its OK
if not self.pk or (self.pk and self.pk != other.pk):
raise ValidationError(
'Resource clash with another Entry. Please change resource or time.'
)
def validateCustomerNotDoubleBooked(self):
"""
Context validation of customer.
A named customer cannot have two entries at the same time, irrespective
of other resource criteria. Cancelled entries don't count.
"""
if self.cancelled or self.no_show:
return
if self.customer:
# get any uncancelled entries for the same customer on the same day
savedEntries = Entry.objects.filter(
date=self.date,
customer=self.customer,
cancelled=False,
no_show=False,
)
# ensure no time clashes
for other in savedEntries:
if self == other:
# if we are just saving the same entry, its OK
if not self.pk or (self.pk and self.pk != other.pk):
raise ValidationError(
'Double booking is not allowed. Please choose another time.'
)
def validateTradingHours(self):
"""
Context validation of trading times.
Staff members may add/edit an entry at any time subject to other
business rules. Customers may only add/edit entries consistent with
opening hours.
"""
if not self.editor.is_staff:
dow = self.date.weekday()
if not (
self.time >= settings.DIARY_OPENING_TIMES[dow] and
self.time_end() <= settings.DIARY_CLOSING_TIMES[dow]
):
raise ValidationError(
'Sorry, the store is closed then. Try changing the time.'
)
def validateFuture(self):
"""
Ensure customers cannot book times in the past or in the advance booking
period.
Staff can book entries whenever they like, but customers can only book
times in the future.
"""
if not self.editor.is_staff:
tz_now = timezone.localtime(timezone.now())
now = datetime.datetime(
tz_now.year, tz_now.month, tz_now.day,
tz_now.hour, tz_now.minute, tz_now.second,
)
advance_booking_date = (
datetime.datetime(
tz_now.year, tz_now.month, tz_now.day, 0, 0, 0,
) +
datetime.timedelta(days=settings.DIARY_MIN_BOOKING)
)
bookedTime = datetime.datetime.combine(self.date, self.time)
if bookedTime < now:
raise ValidationError(
'Please book a date/time in the future.'
)
if bookedTime < advance_booking_date:
raise ValidationError(
'Need to book ahead.'
)
def clean(self, *args, **kwargs):
"""
Override Model method to validate the content in context.
This applies the business rules of the Entry validateX methods.
"""
self.validateResourceRequirement()
self.validateDuration()
self.validateNoResourceConflicts()
self.validateCustomerNotDoubleBooked()
self.validateTradingHours()
self.validateFuture()
# now do the standard field validation
super(Entry, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
"""
Override the parent method to ensure custom validation in clean() is
done.
"""
self.full_clean()
super(Entry, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'entries'
| BobBowles/django-diary | diary/models.py | Python | mit | 13,465 |
"""Infrastructure for detecting abstraction barrier violations."""
class AbstractionViolation(Exception):
pass
def datatype(obj):
return type(obj).__name__
# Generic abstract data type
class Abstract(object):
def __add__(self, other):
raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other)))
def __radd__(self, other):
raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other)))
def __eq__(self, other):
if isinstance(other, type(self)):
return other is self
raise AbstractionViolation("Can't use == on {} object and {}".format(datatype(self), datatype(other)))
def __ne__(self, other):
if isinstance(other, type(self)):
return other is not self
raise AbstractionViolation("Can't use != on {} object and {}".format(datatype(self), datatype(other)))
def __bool__(self):
raise AbstractionViolation("Can't use {} object as a boolean".format(datatype(self)))
def __getitem__(self, index):
raise AbstractionViolation("Can't use [] notation on {} object".format(datatype(self)))
def __contains__(self, other):
raise AbstractionViolation("Can't use contains notation on {} object".format(datatype(self)))
def __delitem__(self, other):
raise AbstractionViolation("Can't use del notation on {} object".format(datatype(self)))
def __iter__(self):
raise AbstractionViolation("Can't iterate on {} object".format(datatype(self)))
def __len__(self):
raise AbstractionViolation("Can't use len notation on {} object".format(datatype(self)))
def __setitem__(self, key, item):
raise AbstractionViolation("Can't use setitem notation on {} object".format(datatype(self)))
def __call__(self, *args, **kwargs):
raise AbstractionViolation("Can't call {} object".format(datatype(self)))
def __hash__(self):
return id(self)
class User(Abstract):
def __init__(self, name, reviews):
self.a, self.b = name, {review_restaurant_name(r): r for r in reviews}
def __repr__(self):
return '<User {} {}>'.format(self.a, list(map(repr, self.b)))
make_user = User
user_name = lambda u: u.a
user_reviews = lambda u: u.b
class Review(Abstract):
def __init__(self, restaurant_name, rating):
self.a, self.b = restaurant_name, rating
def __repr__(self):
return '<Review {} {}>'.format(self.a, self.b)
make_review = Review
review_restaurant_name = lambda r: r.a
review_rating = lambda r: r.b
class Restaurant(Abstract):
def __init__(self, name, location, categories, price, reviews):
self.a, self.b, self.c, self.d, self.e = name, location, categories, price, reviews
self.f = [review_rating(r) for r in reviews]
self.g = len(self.e)
self.h = sum(review_rating(r) for r in self.e) / len(self.e)
def __repr__(self):
return '<Restaurant {}>'.format(self.a)
make_restaurant = Restaurant
restaurant_name = lambda r: r.a
restaurant_location = lambda r: r.b
restaurant_categories = lambda r: r.c
restaurant_price = lambda r: r.d
restaurant_ratings = lambda r: r.f
restaurant_num_ratings = lambda r: r.g
restaurant_mean_rating = lambda r: r.h
old = {}
def swap_implementations(impl, user=True, review=True, rest=True, rest_two=True):
# save other implementations
old['user'] = impl.make_user, impl.user_name, impl.user_reviews
old['review'] = impl.make_review, impl.review_restaurant_name, impl.review_rating
old['rest'] = impl.make_restaurant, impl.restaurant_name, impl.restaurant_location, impl.restaurant_categories, impl.restaurant_price, impl.restaurant_ratings
old['rest_two'] = impl.restaurant_num_ratings, impl.restaurant_mean_rating
# save our implementations
new_user = make_user, user_name, user_reviews
new_review = make_review, review_restaurant_name, review_rating
new_rest = make_restaurant, restaurant_name, restaurant_location, restaurant_categories, restaurant_price, restaurant_ratings
new_rest_two = restaurant_num_ratings, restaurant_mean_rating
# replace impl's implementations with ours
if user:
impl.make_user, impl.user_name, impl.user_reviews = new_user
if review:
impl.make_review, impl.review_restaurant_name, impl.review_rating = new_review
if rest:
impl.make_restaurant, impl.restaurant_name, impl.restaurant_location, impl.restaurant_categories, impl.restaurant_price, impl.restaurant_ratings = new_rest
if rest_two:
impl.restaurant_num_ratings, impl.restaurant_mean_rating = new_rest_two
def restore_implementations(impl):
impl.make_user, impl.user_name, impl.user_reviews = old['user']
impl.make_review, impl.review_restaurant_name, impl.review_rating = old['review']
impl.make_restaurant, impl.restaurant_name, impl.restaurant_location, impl.restaurant_categories, impl.restaurant_price, impl.restaurant_ratings = old['rest']
impl.restaurant_num_ratings, impl.restaurant_mean_rating = old['rest_two']
def check_same_elements(cluster1, cluster2):
return len(cluster1) == len(cluster2) and all(el1 == el2 for el1, el2 in zip(cluster1, cluster2))
def deep_check_same_elements(clusters1, clusters2):
return len(clusters1) == len(clusters2) and all(check_same_elements(c1, c2) for c1, c2 in zip(clusters1, clusters2))
def sample(lst, k):
return lst[:k]
def predict_multiple_ratings(user, restaurants, cluster, feature_fns):
predictions, r_squared = [3, 3.5, 4, 4.5, 5], 0.61
return [predictions[i % 5] for i in range(len(restaurants))], r_squared
| ajponte/yelpML | maps/tests/test_functions.py | Python | mit | 5,659 |
"""The Pirate Bay Module."""
import logging
import sys
from torrench.utilities.Config import Config
class ThePirateBay(Config):
"""
ThePirateBay class.
This class fetches torrents from TPB proxy,
and diplays results in tabular form.
Further, torrent details can be fetched which are
stored in dynamically-generated HTML page.
Details are fetched from tpb_details module
and stored in $HOME/.torrench/temp directory.
All activities are logged and stored in a log file.
In case of errors/unexpected output, refer logs.
"""
def __init__(self, title, page_limit):
"""Initialisations."""
Config.__init__(self)
self.proxies = self.get_proxies('tpb')
self.proxy = None
self.title = title
self.pages = page_limit
self.logger = logging.getLogger('log1')
self.class_name = self.__class__.__name__.lower()
self.index = 0
self.page = 0
self.total_fetch_time = 0
self.mylist = []
self.masterlist = []
self.mylist_crossite = []
self.masterlist_crossite = []
self.mapper = []
self.soup_dict = {}
self.soup = None
self.headers = [
'CATEG', 'NAME', 'INDEX', 'UPLOADER', 'SIZE', 'SE/LE', 'DATE', 'C']
###################################
self.non_color_name = None
self.top = "/top/all"
self.top48 = "/top/48hall"
def check_proxy(self):
"""
To check proxy availability.
Proxy is checked in two steps:
1. To see if proxy 'website' is available.
2. A test is carried out with a sample string 'hello'.
If results are found, test is passed, else test failed!
This class inherits Config class. Config class inherits
Common class. The Config class provides proxies list fetched
from config file. The Common class consists of commonly used
methods.
In case of failiur, next proxy is tested with same procedure.
This continues until working proxy is found.
If no proxy is found, program exits.
"""
count = 0
for proxy in self.proxies:
print("Trying %s" % (self.colorify("yellow", proxy)))
self.logger.debug("Trying proxy: %s" % (proxy))
self.soup = self.http_request(proxy)
try:
if self.soup == -1 or self.soup.a.string != 'The Pirate Bay':
print("Bad proxy!")
count += 1
if count == len(self.proxies):
print("No more proxies found! Exiting...")
sys.exit(2)
else:
continue
else:
print("Proxy available. Performing test...")
url = proxy+"/search/hello/0/99/0"
self.logger.debug("Carrying out test for string 'hello'")
self.soup = self.http_request(url)
test = self.soup.find('div', class_='detName')
if test is not None:
self.proxy = proxy
print("Pass!")
self.logger.debug("Test passed!")
break
else:
print("Test failed!\nPossibly site not reachable. See logs.")
self.logger.debug("Test failed!")
except (AttributeError, Exception) as e:
self.logger.exception(e)
pass
def get_html(self):
"""
To get HTML page.
Once proxy is found, the HTML page for
corresponding search string is fetched.
Also, the time taken to fetch that page is returned.
Uses http_request_time() from Common.py module.
"""
try:
for self.page in range(self.pages):
print("\nFetching from page: %d" % (self.page+1))
search = "/search/%s/%d/99/0" % (self.title, self.page)
self.soup, time = self.http_request_time(self.proxy + search)
content = self.soup.find('table', id="searchResult")
if content is None:
print("[No results]")
break
self.logger.debug("fetching page %d/%d" % (self.page+1, self.pages))
print("[in %.2f sec]" % (time))
self.logger.debug("page fetched in %.2f sec!" % (time))
self.total_fetch_time += time
self.soup_dict[self.page] = self.soup
except Exception as e:
self.logger.exception(e)
print("Error message: %s" %(e))
print("Something went wrong! See logs for details. Exiting!")
sys.exit(2)
def get_top_html(self):
"""To get top torrents."""
try:
print(self.colorify("green", "\n\n*Top 100 TPB Torrents*"))
print("1. Top (ALL)\n2. Top (48H)\n")
option = int(input("Option: "))
if option == 1:
self.logger.debug("Selected [TOP-ALL] (Option: %d)" % (option))
self.soup, time = self.http_request_time(self.proxy + self.top)
elif option == 2:
self.logger.debug("Selected [TOP-48h] (Option: %d)" % (option))
self.soup, time = self.http_request_time(self.proxy + self.top48)
else:
print("Bad Input! Exiting!")
sys.exit(2)
self.total_fetch_time = time
self.soup_dict[0] = self.soup
except ValueError as e:
print("Bad input! Exiting!")
self.logger.exception(e)
sys.exit(2)
def parse_html(self):
"""
Parse HTML to get required results.
Results are fetched in masterlist list.
Also, a mapper[] is used to map 'index'
with torrent name, link and magnetic link
"""
try:
for page in self.soup_dict:
self.soup = self.soup_dict[page]
content = self.soup.find('table', id="searchResult")
if content is None:
return
data = content.find_all('tr')
for i in data[1:]:
name = i.find('a', class_='detLink').string
uploader = i.find('font', class_="detDesc").a
if name is None:
name = i.find('a', class_='detLink')['title'].split(" ")[2:]
name = " ".join(str(x) for x in name)
if uploader is None:
uploader = i.find('font', class_="detDesc").i.string
else:
uploader = uploader.string
comments = i.find(
'img', {'src': '//%s/static/img/icon_comment.gif' % (self.proxy.split('/')[2])})
# Total number of comments
if comments is None:
comment = '0'
else:
comment = comments['alt'].split(" ")[-2]
# See if uploader is VIP/Truested/Normal Uploader
self.non_color_name = name
is_vip = i.find('img', {'title': "VIP"})
is_trusted = i.find('img', {'title': 'Trusted'})
if(is_vip is not None):
name = self.colorify("green", name)
uploader = self.colorify("green", uploader)
elif(is_trusted is not None):
name = self.colorify("magenta", name)
uploader = self.colorify("magenta", uploader)
categ = i.find('td', class_="vertTh").find_all('a')[0].string
sub_categ = i.find('td', class_="vertTh").find_all('a')[1].string
seeds = i.find_all('td', align="right")[0].string
leeches = i.find_all('td', align="right")[1].string
date = i.find('font', class_="detDesc").get_text().split(' ')[1].replace(',', "")
size = i.find('font', class_="detDesc").get_text().split(' ')[3].replace(',', "")
seeds_color = self.colorify("green", seeds)
leeches_color = self.colorify("red", leeches)
# Unique torrent id
torr_id = i.find('a', {'class': 'detLink'})["href"].split('/')[2]
# Upstream torrent link
link = "%s/torrent/%s" % (self.proxy, torr_id)
magnet = i.find_all('a', {'title': 'Download this torrent using magnet'})[0]['href']
self.index += 1
self.mapper.insert(self.index, (name, magnet, link, self.class_name))
self.mylist = [categ + " > " + sub_categ, name, "--" +
str(self.index) + "--", uploader, size, (seeds_color + '/' +
leeches_color), date, comment]
self.masterlist.append(self.mylist)
self.mylist_crossite = [name+" ({})".format(uploader), self.index, size, seeds+'/'+leeches, date]
self.masterlist_crossite.append(self.mylist_crossite)
except Exception as e:
self.logger.exception(e)
print("Error message: %s" % (e))
print("Something went wrong! See logs for details. Exiting!")
sys.exit(2)
def main(title, page_limit):
"""Execution begins here."""
try:
print("\n[The Pirate Bay]\n")
print("Obtaining proxies...")
tpb = ThePirateBay(title, page_limit)
tpb.check_proxy()
if title is None:
tpb.get_top_html()
else:
tpb.get_html()
tpb.parse_html()
tpb.post_fetch()
print("\nBye!")
except KeyboardInterrupt:
tpb.logger.debug("Keyboard interupt! Exiting!")
print("\n\nAborted!")
def cross_site(title, page_limit):
tpb = ThePirateBay(title, page_limit)
return tpb
if __name__ == "__main__":
print("It's a module!")
| kryptxy/torrench | torrench/modules/thepiratebay.py | Python | gpl-3.0 | 10,265 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
Python Kerberos GSS APIs used by spnego_kerberos_auth.py.
It is used as a place holder for kerberos.py which is not available.
"""
class KrbError(Exception):
pass
class GSSError(KrbError):
pass
def authGSSClientInit(service):
pass
def authGSSClientClean(context):
pass
def authGSSClientStep(context, challenge):
pass
def authGSSClientResponse(context):
pass
| radicalbit/ambari | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/krberr.py | Python | apache-2.0 | 1,168 |
from dataclasses import dataclass, field
from typing import Optional
from xsdata.models.datatype import XmlDateTime
from npoapi.data.api_constraint_media import Filter
from npoapi.data.api_constraint_page import Filter as ApiConstraintPageFilter
__NAMESPACE__ = "urn:vpro:api:profile:2013"
@dataclass
class ProfileDefinitionType:
class Meta:
name = "profileDefinitionType"
filter: Optional[Filter] = field(
default=None,
metadata={
"type": "Element",
"namespace": "urn:vpro:api:constraint:media:2013",
}
)
vproApiConstraintPage2013Filter: Optional[ApiConstraintPageFilter] = field(
default=None,
metadata={
"name": "filter",
"type": "Element",
"namespace": "urn:vpro:api:constraint:page:2013",
}
)
since: Optional[XmlDateTime] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class ProfileType:
class Meta:
name = "profileType"
pageProfile: Optional[ProfileDefinitionType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "urn:vpro:api:profile:2013",
}
)
mediaProfile: Optional[ProfileDefinitionType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "urn:vpro:api:profile:2013",
}
)
timestamp: Optional[XmlDateTime] = field(
default=None,
metadata={
"type": "Attribute",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class Profile(ProfileType):
class Meta:
name = "profile"
namespace = "urn:vpro:api:profile:2013"
| npo-poms/pyapi | npoapi/data/profile.py | Python | gpl-3.0 | 1,838 |
class Membro(object):
nome = None
cargo = None
def __init__(self, nome, cargo):
self.nome = nome
self.cargo = cargo
| TeamWorkQualityReport/TeamWorkQualityReport | Backend/Analise/entidades/membro.py | Python | gpl-3.0 | 125 |
from typing import Set
from pydantic import BaseModel, DSN, BaseSettings, PyObject
class SubModel(BaseModel):
foo = 'bar'
apple = 1
class Settings(BaseSettings):
redis_host = 'localhost'
redis_port = 6379
redis_database = 0
redis_password: str = None
auth_key: str = ...
invoicing_cls: PyObject = 'path.to.Invoice'
db_name = 'foobar'
db_user = 'postgres'
db_password: str = None
db_host = 'localhost'
db_port = '5432'
db_driver = 'postgres'
db_query: dict = None
dsn: DSN = None
# to override domains:
# export MY_PREFIX_DOMAINS = '["foo.com", "bar.com"]'
domains: Set[str] = set()
# to override more_settings:
# export MY_PREFIX_MORE_SETTINGS = '{"foo": "x", "apple": 1}'
more_settings: SubModel = SubModel()
class Config:
env_prefix = 'MY_PREFIX_' # defaults to 'APP_'
fields = {
'auth_key': {
'alias': 'my_api_key'
}
}
| petroswork/pydantic | docs/examples/settings.py | Python | mit | 990 |
from pyd.support import setup, Extension, pydexe_sanity_check
pydexe_sanity_check()
projName = 'interpcontext'
setup(
name=projName,
version='1.0',
ext_modules=[
Extension(projName, ['interpcontext.d'],
build_deimos=True, d_lump=True
)
],
)
| ariovistus/pyd | examples/interpcontext/setup.py | Python | mit | 286 |
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from myhome.views import *
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico', permanent=True)),
url(r'^medicines/$', MedicinesView.as_view(), name='medicine'),
url(r'^medicines/new/$', MedicinesCreateView.as_view(), name='medicine_add'),
url(r'^medicines/del/(?P<pk>\d+)$', MedicinesDeleteView.as_view(), name='medicine_del'),
url(r'^medicines/edit/(?P<pk>\d+)$', MedicinesUpdateView.as_view(), name='medicine_upd'),
url(r'$', TemplateView.as_view(template_name='base.html'), name='home'),
]
# static files
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| burzillibus/RobHome | RobHome/urls.py | Python | mit | 862 |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class CopyAttributesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
plane = GafferScene.Plane()
customAttributes = GafferScene.CustomAttributes()
customAttributes["in"].setInput( plane["out"] )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "a", IECore.IntData( 1 ) ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "b", IECore.IntData( 2 ) ) )
# Node should do nothing without a filter applied.
copyAttributes = GafferScene.CopyAttributes()
copyAttributes["in"].setInput( plane["out"] )
copyAttributes["source"].setInput( customAttributes["out"] )
self.assertScenesEqual( plane["out"], copyAttributes["out"] )
self.assertSceneHashesEqual( plane["out"], copyAttributes["out"] )
# Applying a filter should kick it into action.
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
copyAttributes["filter"].setInput( f["out"] )
self.assertEqual( copyAttributes["out"].attributes( "/plane" ), customAttributes["out"].attributes( "/plane" ) )
# We should be able to copy just some attributes.
copyAttributes["attributes"].setValue( "a" )
self.assertEqual( copyAttributes["out"].attributes( "/plane" ).keys(), [ "a" ] )
self.assertEqual( copyAttributes["out"].attributes( "/plane" )["a"], customAttributes["out"].attributes( "/plane" )["a"] )
def testDeleteExisting( self ) :
plane = GafferScene.Plane()
aAttributes = GafferScene.CustomAttributes()
aAttributes["in"].setInput( plane["out"] )
a = Gaffer.NameValuePlug( "a", IECore.IntData( 1 ) )
aAttributes["attributes"].addChild( a )
bAttributes = GafferScene.CustomAttributes()
bAttributes["in"].setInput( plane["out"] )
b = Gaffer.NameValuePlug( "b", IECore.IntData( 2 ) )
bAttributes["attributes"].addChild( b )
copyAttributes = GafferScene.CopyAttributes()
copyAttributes["in"].setInput( aAttributes["out"] )
copyAttributes["source"].setInput( bAttributes["out"] )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
copyAttributes["filter"].setInput( f["out"] )
# Delete existing off.
self.assertEqual(
copyAttributes["out"].attributes( "/plane" ),
IECore.CompoundObject( {
"a" : aAttributes["out"].attributes( "/plane" )["a"],
"b" : bAttributes["out"].attributes( "/plane" )["b"],
} )
)
# Delete existing on.
copyAttributes["deleteExisting"].setValue( True )
self.assertEqual(
copyAttributes["out"].attributes( "/plane" ),
IECore.CompoundObject( {
"b" : bAttributes["out"].attributes( "/plane" )["b"],
} )
)
# We shouldn't even evaluate the incoming attributes if
# we're going to delete them anyway.
a["value"].setValue( 20 ) # Invalidate cache
b["value"].setValue( 30 ) # Invalidate cache
with Gaffer.PerformanceMonitor() as pm :
copyAttributes["out"].attributes( "/plane" )
self.assertIn( bAttributes["out"]["attributes"], pm.allStatistics() )
self.assertNotIn( aAttributes["out"]["attributes"], pm.allStatistics() )
def testSourceLocation( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
sphereAttributes = GafferScene.CustomAttributes()
sphereAttributes["in"].setInput( sphere["out"] )
sphereAttributes["attributes"].addChild( Gaffer.NameValuePlug( "a", IECore.IntData( 2 ) ) )
parent = GafferScene.Parent()
parent["parent"].setValue( "/" )
parent["in"].setInput( plane["out"] )
parent["children"][0].setInput( sphereAttributes["out"] )
copyAttributes = GafferScene.CopyAttributes()
copyAttributes["in"].setInput( parent["out"] )
copyAttributes["source"].setInput( parent["out"] )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
copyAttributes["filter"].setInput( f["out"] )
self.assertEqual(
copyAttributes["out"].attributes( "/plane" ),
parent["out"].attributes( "/plane" )
)
copyAttributes["sourceLocation"].setValue( "/sphere" )
self.assertEqual(
copyAttributes["out"].attributes( "/plane" ),
parent["out"].attributes( "/sphere" )
)
def testRanges( self ) :
script = Gaffer.ScriptNode()
script["copy"] = GafferScene.CopyAttributes()
script["box"] = Gaffer.Box()
script["box"]["copy"] = GafferScene.CopyAttributes()
self.assertEqual(
list( GafferScene.CopyAttributes.Range( script ) ),
[ script["copy"] ],
)
self.assertEqual(
list( GafferScene.CopyAttributes.RecursiveRange( script ) ),
[ script["copy"], script["box"]["copy"] ],
)
if __name__ == "__main__":
unittest.main()
| appleseedhq/gaffer | python/GafferSceneTest/CopyAttributesTest.py | Python | bsd-3-clause | 6,464 |
from flask import Flask, render_template, request
from httplib2 import Http
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/zalando", methods=['GET',])
def api():
if request.method == 'GET':
h = Http()
headers = {'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8'
}
gender = request.args.get('gender', 'male')
size = request.args.get('size', '')
url = 'http://disrupt-hackathon.zalando.net/search/www.zalando.de/'
if gender == 'male':
url += 'handschuhe-herren'
else:
url += 'handschuhe-damen'
resp, content = h.request(url + '?size=' + size, "GET", headers=headers)
print resp
if resp['status'] == '200':
return content
else:
return resp
if __name__ == "__main__":
app.run(debug=True) | taschik/handzando | webapp.py | Python | mit | 945 |
req=requests.get("http://www.twse.com.tw/ch/trading/fund/BFI82U/BFI82U.php?report1=day&input_date=105%2F05%2F31&mSubmit=%ACd%B8%DF&yr=2016&w_date=20160530&m_date=20160501")
req.encoding='utf-8'
html=req.text.encode('utf-8')
soup=BeautifulSoup(html,"html.parser")
for td in soup.findAll("td",{"class":"basic2"}):
print td.text
| macchiang/practice-on-big-data | crawl/crawl-twse.py | Python | mit | 330 |
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
from ioc import SetTestMode
from gateway.dto import GlobalFeedbackDTO, FeedbackLedDTO
from gateway.hal.mappers_classic import GlobalFeedbackMapper
from master.classic.eeprom_models import CanLedConfiguration
class InputCoreMapperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
SetTestMode()
def test_mapping_basic(self):
orm = GlobalFeedbackMapper.dto_to_orm(GlobalFeedbackDTO(id=0,
can_led_1=None,
can_led_2=FeedbackLedDTO(id=10,
function=FeedbackLedDTO.Functions.FB_B5_NORMAL),
can_led_3=None,
can_led_4=None))
self.assertEqual(0, orm.id)
self.assertEqual(255, orm.can_led_1_id)
self.assertEqual('UNKNOWN', orm.can_led_1_function)
self.assertEqual(10, orm.can_led_2_id)
self.assertEqual('Fast blink B5', orm.can_led_2_function)
self.assertEqual(255, orm.can_led_3_id)
self.assertEqual('UNKNOWN', orm.can_led_3_function)
self.assertEqual(255, orm.can_led_4_id)
self.assertEqual('UNKNOWN', orm.can_led_4_function)
dto = GlobalFeedbackMapper.orm_to_dto(CanLedConfiguration.deserialize({'id': 0,
'can_led_1_id': 15,
'can_led_1_function': 'On B8 Inverted',
'can_led_2_id': 255,
'can_led_2_function': 'UNKNOWN',
'can_led_3_id': 255,
'can_led_3_function': 'UNKNOWN',
'can_led_4_id': 255,
'can_led_4_function': 'UNKNOWN'
}))
self.assertEqual(0, dto.id)
self.assertEqual(FeedbackLedDTO(id=15, function=FeedbackLedDTO.Functions.ON_B8_INVERTED), dto.can_led_1)
self.assertEqual(FeedbackLedDTO(id=None, function=FeedbackLedDTO.Functions.UNKNOWN), dto.can_led_2)
self.assertEqual(FeedbackLedDTO(id=None, function=FeedbackLedDTO.Functions.UNKNOWN), dto.can_led_3)
self.assertEqual(FeedbackLedDTO(id=None, function=FeedbackLedDTO.Functions.UNKNOWN), dto.can_led_4)
| openmotics/gateway | testing/unittests/gateway_tests/mappers/can_led_test.py | Python | agpl-3.0 | 3,548 |
# Copyright (c) 2007 Mitchell N. Charity
# Copyright (c) 2009-2012 Walter Bender
# Copyright (c) 2012 Flavio Danesse
# Copyright (c) 2013 Aneesh Dogra <[email protected]>
#
# This file is part of Ruler.
#
# Ruler is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ruler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ruler. If not, see <http://www.gnu.org/licenses/>
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Gdk
from gi.repository import Gtk
from sugar3.activity import activity
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.activity.widgets import StopButton
from sugar3.graphics import style
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbarbox import ToolbarButton
GRID_CELL_SIZE = style.GRID_CELL_SIZE
import logging
_logger = logging.getLogger("ruler-activity")
from gettext import gettext as _
from util import calc_dpi
import show_rulers
import show_grids
import show_checkers
import show_angles
MMPERINCH = 25.4
class MyCanvas(Gtk.DrawingArea):
''' Create a GTK+ widget on which we will draw using Cairo '''
def __init__(self):
Gtk.DrawingArea.__init__(self)
self._draw_ruler = False
self._object = None
self.connect('draw', self.__draw_cb)
self._dpi = 96
def __draw_cb(self, widget, cr):
if self._draw_ruler:
self._object.draw(cr, self._dpi)
cr.clip()
def add_a_ruler(self, r):
self._draw_ruler = True
self._object = r
self.queue_draw()
def get_dpi(self):
return self._dpi
def set_dpi(self, dpi):
self._dpi = dpi
#
# Sugar activity
#
class RulerActivity(activity.Activity):
def __init__(self, handle):
super(RulerActivity, self).__init__(handle)
self.button_dict = {}
self.callback_dict = {}
self._ready = False
font = 'helvetica 12'
font_bold = 'helvetica bold 12'
#
# We need a canvas
#
self._canvas = MyCanvas()
self.set_canvas(self._canvas)
self._canvas.show()
screen = Gdk.Screen()
width = screen.width()
height = screen.height() - GRID_CELL_SIZE
dpi, self.known_dpi = calc_dpi()
self._canvas.set_dpi(dpi)
# Create instances of our graphics
self._r = show_rulers.ScreenOfRulers(font, font_bold, width, height)
self._gcm = show_grids.ScreenGrid_cm(font, font_bold, width, height)
self._gmm = show_grids.ScreenGrid_mm(font, font_bold, width, height)
self._a90 = show_angles.Angles90(font, font_bold, width, height)
self._a360 = show_angles.Angles360(font, font_bold, width, height)
self._c = show_checkers.ScreenOfCircles(font, font_bold, width, height)
# start with a ruler
self._current = self._r
self._canvas.add_a_ruler(self._current)
# other settings
self._grids_mode = "cm"
self._angles_mode = "90"
#
# We need some toolbars
#
self.max_participants = 1
toolbar_box = ToolbarBox()
# Buttons added to the Activity toolbar
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
activity_button.show()
self.rulers = radio_factory('ruler',
toolbar_box.toolbar,
self._rulers_cb,
tooltip=_('Ruler'),
group=None)
self.grids = radio_factory('grid-a',
toolbar_box.toolbar,
self._grids_cb,
tooltip=_('Grid'),
group=self.rulers)
self.angles = radio_factory('angles-90',
toolbar_box.toolbar,
self._angles_cb,
tooltip=_('Angles'),
group=self.rulers)
self.checker = radio_factory('checker',
toolbar_box.toolbar,
self._checker_cb,
tooltip=_('Checker'),
group=self.rulers)
self.wrapper = Gtk.ToolItem()
self.wrapper2 = Gtk.ToolItem()
self.wrapper3 = Gtk.ToolItem()
self.custom_unit_entry = Gtk.Entry()
self.txt1 = Gtk.Label()
self.txt1.set_text(_('1 custom unit equals '))
self.txt2 = Gtk.Label()
# TRANS: mm is for Milli Meters
self.txt2.set_text(_(' mm.'))
self.wrapper.add(self.txt1)
self.wrapper2.add(self.custom_unit_entry)
self.wrapper3.add(self.txt2)
self.wrapper.show_all()
self.wrapper2.show_all()
self.wrapper3.show_all()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
separator.set_expand(False)
separator.show()
toolbar_box.toolbar.insert(separator, -1)
custom_units_toolbox = ToolbarBox()
custom_units_toolbox.toolbar.insert(self.wrapper, -1)
custom_units_toolbox.toolbar.insert(self.wrapper2, -1)
custom_units_toolbox.toolbar.insert(self.wrapper3, -1)
custom_units_toolbox.show()
self.custom_units_button = ToolbarButton(icon_name='view-source',
page=custom_units_toolbox)
toolbar_box.toolbar.insert(self.custom_units_button, -1)
self.custom_unit_entry.connect('changed', self.custom_unit_change_cb)
self.custom_units_button.show()
if not self.known_dpi:
separator = Gtk.SeparatorToolItem()
separator.show()
toolbar_box.toolbar.insert(separator, -1)
dpi = self._canvas.get_dpi()
self._dpi_spin_adj = Gtk.Adjustment(dpi, 72, 200, 2, 32, 0)
self._dpi_spin = Gtk.SpinButton(self._dpi_spin_adj, 0, 0)
self._dpi_spin_id = self._dpi_spin.connect('value-changed',
self._dpi_spin_cb)
self._dpi_spin.set_numeric(True)
self._dpi_spin.show()
self.tool_item_dpi = Gtk.ToolItem()
self.tool_item_dpi.add(self._dpi_spin)
toolbar_box.toolbar.insert(self.tool_item_dpi, -1)
self.tool_item_dpi.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
separator.show()
toolbar_box.toolbar.insert(separator, -1)
# The ever-present Stop Button
stop_button = StopButton(self)
stop_button.props.accelerator = '<Ctrl>Q'
toolbar_box.toolbar.insert(stop_button, -1)
stop_button.show()
self.set_toolbar_box(toolbar_box)
toolbar_box.show()
self.show_all()
# Restore state if previously saved
self._ready = True
if 'ruler' in self.metadata and \
self.metadata['ruler'] in self.button_dict:
_logger.debug('restoring %s', self.metadata['ruler'])
self.button_dict[self.metadata['ruler']].set_active(True)
self.callback_dict[self.metadata['ruler']]
else:
self._rulers_cb()
self.rulers.set_active(True)
if 'custom_unit' in self.metadata:
self.custom_unit_entry.set_text(self.metadata['custom_unit'])
else: # set the default
self.custom_unit_entry.set_text("25.4")
#
# Button callbacks
#
def _rulers_cb(self, button=None):
if self._ready:
self.custom_units_button.set_sensitive(True)
self._current = self._r
self._canvas.add_a_ruler(self._current)
_logger.debug('selecting ruler')
self.metadata['ruler'] = 'ruler'
return False
def custom_unit_change_cb(self, widget):
try:
new = float(widget.get_text())
except ValueError:
new = MMPERINCH
new = abs(new)
if new == 0:
new = MMPERINCH
if widget.get_text != '':
widget.set_text(str(new))
self._canvas.add_a_ruler(self._r)
self._r.custom_unit_in_mm = new
self._r.draw_custom_ruler(self._r.custom_unit_in_mm)
self.metadata['custom_unit'] = str(new)
def _grids_cb(self, button=None):
if self._ready:
self.custom_units_button.set_sensitive(False)
self.custom_units_button.set_expanded(False)
if self._grids_mode == "cm":
self._current = self._gcm
if hasattr(self, 'grids'):
self.grids.set_icon_name("grid-c")
self._grids_mode = "mm"
else:
self._current = self._gmm
if hasattr(self, 'grids'):
self.grids.set_icon_name("grid-a")
self._grids_mode = "cm"
self._canvas.add_a_ruler(self._current)
_logger.debug('selecting grids')
self.metadata['ruler'] = 'grids'
return False
def _angles_cb(self, button=None):
if self._ready:
self.custom_units_button.set_sensitive(False)
self.custom_units_button.set_expanded(False)
if self._angles_mode == "90":
self._current = self._a90
if hasattr(self, 'angles'):
self.angles.set_icon_name("angles-360")
self._angles_mode = "360"
else:
self._current = self._a360
if hasattr(self, 'angles'):
self.angles.set_icon_name("angles-90")
self._angles_mode = "90"
self._canvas.add_a_ruler(self._current)
_logger.debug('selecting angles')
self.metadata['ruler'] = 'angles'
return False
def _checker_cb(self, button=None):
if self._ready:
self.custom_units_button.set_sensitive(False)
self.custom_units_button.set_expanded(False)
self._current = self._c
self._canvas.add_a_ruler(self._current)
_logger.debug('selecting checker')
self.metadata['ruler'] = 'checker'
return False
def _dpi_spin_cb(self, button):
self._canvas.set_dpi(self._dpi_spin.get_value_as_int())
self._canvas.add_a_ruler(self._current)
return
def write_file(self, file_path):
''' Write the dpi to the Journal '''
dpi = self._canvas.get_dpi()
_logger.debug("Write dpi: " + str(dpi))
self.metadata['dpi'] = str(dpi)
def radio_factory(icon_name, toolbar, callback, cb_arg=None,
tooltip=None, group=None):
''' Add a radio button to a toolbar '''
button = RadioToolButton(group=group)
button.set_icon_name(icon_name)
if tooltip is not None:
button.set_tooltip(tooltip)
if cb_arg is None:
button.connect('clicked', callback)
else:
button.connect('clicked', callback, cb_arg)
if hasattr(toolbar, 'insert'): # the main toolbar
toolbar.insert(button, -1)
else: # or a secondary toolbar
toolbar.props.page.insert(button, -1)
button.show()
return button
| walterbender/ruler | RulerActivity.py | Python | gpl-3.0 | 12,067 |
# -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
from weakref import WeakKeyDictionary
from cStringIO import StringIO
from Cookie import SimpleCookie, Morsel, CookieError
from time import gmtime
from datetime import datetime, date
_logger = None
_empty_stream = StringIO('')
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _proxy_repr(cls):
def proxy_repr(self):
return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self))
return proxy_repr
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and not kwarg_var is not None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _patch_wrapper(old, new):
"""Helper function that forwards all the function details to the
decorated function."""
try:
new.__name__ = old.__name__
new.__module__ = old.__module__
new.__doc__ = old.__doc__
new.__dict__ = old.__dict__
except Exception:
pass
return new
def _decode_unicode(value, charset, errors):
"""Like the regular decode function but this one raises an
`HTTPUnicodeError` if errors is `strict`."""
fallback = None
if errors.startswith('fallback:'):
fallback = errors[9:]
errors = 'strict'
try:
return value.decode(charset, errors)
except UnicodeError, e:
if fallback is not None:
return value.decode(fallback, 'replace')
from werkzeug.exceptions import HTTPUnicodeError
raise HTTPUnicodeError(str(e))
def _iter_modules(path):
"""Iterate over all modules in a package."""
import os
import pkgutil
if hasattr(pkgutil, 'iter_modules'):
for importer, modname, ispkg in pkgutil.iter_modules(path):
yield modname, ispkg
return
from inspect import getmodulename
from pydoc import ispackage
found = set()
for path in path:
for filename in os.listdir(path):
p = os.path.join(path, filename)
modname = getmodulename(filename)
if modname and modname != '__init__':
if modname not in found:
found.add(modname)
yield modname, ispackage(modname)
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (int, long, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, (int, long, float)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _ExtendedMorsel(Morsel):
_reserved = {'httponly': 'HttpOnly'}
_reserved.update(Morsel._reserved)
def __init__(self, name=None, value=None):
Morsel.__init__(self)
if name is not None:
self.set(name, value, value)
def OutputString(self, attrs=None):
httponly = self.pop('httponly', False)
result = Morsel.OutputString(self, attrs).rstrip('\t ;')
if httponly:
result += '; HttpOnly'
return result
class _ExtendedCookie(SimpleCookie):
"""Form of the base cookie that doesn't raise a `CookieError` for
malformed keys. This has the advantage that broken cookies submitted
by nonstandard browsers don't cause the cookie to be empty.
"""
def _BaseCookie__set(self, key, real_value, coded_value):
morsel = self.get(key, _ExtendedMorsel())
try:
morsel.set(key, real_value, coded_value)
except CookieError:
pass
dict.__setitem__(self, key, morsel)
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _easteregg(app):
"""Like the name says. But who knows how it works?"""
gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in '''
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t'''.decode('base64').decode('zlib').splitlines()])
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(('X-Powered-By', 'Werkzeug'))
return start_response(status, headers, exc_info)
if environ.get('QUERY_STRING') != 'macgybarchakku':
return app(environ, injecting_start_response)
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
return ['''
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>''' % gyver]
return easteregged
| Chitrank-Dixit/werkzeug | werkzeug/_internal.py | Python | bsd-3-clause | 14,214 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import copy
from collections import OrderedDict
from . import core
from astropy.table import Table
from . import cparser
from astropy.utils.misc import _set_locale
class FastBasic(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = 'fast_basic'
_description = 'Basic table with custom delimiter using the fast C engine'
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (default_kwargs.get('header_start', 0) is not None
and user_kwargs.get('header_start', 0) is None):
raise ValueError('header_start cannot be set to None for this Reader')
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop('delimiter', ' ')
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get('comment', '# ')
self.comment = kwargs.pop('comment', '#')
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop('quotechar', '"'))
self.header_start = kwargs.pop('header_start', 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get('data_start', self.header_start
+ 1 if self.header_start is not None else 1)
self.data_start = kwargs.pop('data_start', data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError("The C reader does not allow data_start to be None")
elif self.header_start is not None and self.header_start < 0 and \
not isinstance(self, FastCommentedHeader):
raise core.ParameterError("The C reader does not allow header_start to be "
"negative except for commented-header files")
elif self.data_start < 0:
raise core.ParameterError("The C reader does not allow data_start to be negative")
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError("The C reader only supports a length-1 quote character")
elif 'converters' in self.kwargs:
raise core.ParameterError("The C reader does not support passing "
"specialized converters")
elif 'encoding' in self.kwargs:
raise core.ParameterError("The C reader does not use the encoding parameter")
elif 'Outputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Outputter parameter")
elif 'Inputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Inputter parameter")
elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop('strict_names', False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get('fast_reader', True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop('enable', None)
self.return_header_chars = fast_reader.pop('return_header_chars', False)
# Put fast_reader dict back into kwargs.
self.kwargs['fast_reader'] = fast_reader
self.engine = cparser.CParser(table, self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with _set_locale('C'):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta['__ascii_fast_reader_header_chars__'] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = OrderedDict()
if comments:
meta['comments'] = comments
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (core._is_number(name)
or len(name) == 0
or name[0] in bads
or name[-1] in bads):
raise ValueError('Column name {!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(names))
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(self, table, output, default_kwargs,
header_output=True, output_types=False):
# Fast writer supports only 1-d columns
core._check_multidim_table(table, max_ndim=1)
write_kwargs = {'delimiter': self.delimiter,
'quotechar': self.quotechar,
'strip_whitespace': self.strip_whitespace_fields,
'comment': self.write_comment
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
class FastCsv(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = 'fast_csv'
_description = 'Comma-separated values table using the fast C engine'
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({'delimiter': ',', 'comment': None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {'fill_values': [(core.masked, '')]})
class FastTab(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = 'fast_tab'
_description = 'Tab-separated values table using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t'}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
class FastNoHeader(FastBasic):
"""
This class uses the fast C engine to read tables with no header line. If
the names parameter is unspecified, the columns will be autonamed with
"col{}".
"""
_format_name = 'fast_no_header'
_description = 'Basic table with no headers using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'header_start': None, 'data_start': 0}, **kwargs)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that columns names are not included in output.
"""
self._write(table, output, {}, header_output=None)
class FastCommentedHeader(FastBasic):
"""
A faster version of the :class:`CommentedHeader` reader, which looks for
column names in a commented line. ``header_start`` denotes the index of
the header line among all commented lines and is 0 by default.
"""
_format_name = 'fast_commented_header'
_description = 'Columns name in a commented line using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({}, **kwargs)
# Mimic CommentedHeader's behavior in which data_start
# is relative to header_start if unspecified; see #2692
if 'data_start' not in kwargs:
self.data_start = 0
def make_table(self, data, comments):
"""
Actually make the output table give the data and comments. This is
slightly different from the base FastBasic method in the way comments
are handled.
"""
meta = OrderedDict()
if comments:
idx = self.header_start
if idx < 0:
idx = len(comments) + idx
meta['comments'] = comments[:idx] + comments[idx+1:] # noqa
if not meta['comments']:
del meta['comments']
names = core._deduplicate_names(self.engine.get_names())
return Table(data, names=names, meta=meta)
def _read_header(self):
tmp = self.engine.source
commented_lines = []
for line in tmp.splitlines():
line = line.lstrip()
if line and line[0] == self.comment: # line begins with a comment
commented_lines.append(line[1:])
if len(commented_lines) == self.header_start + 1:
break
if len(commented_lines) <= self.header_start:
raise cparser.CParserError('not enough commented lines')
self.engine.setup_tokenizer([commented_lines[self.header_start]])
self.engine.header_start = 0
self.engine.read_header()
self.engine.setup_tokenizer(tmp)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that column names are commented.
"""
self._write(table, output, {}, header_output='comment')
class FastRdb(FastBasic):
"""
A faster version of the :class:`Rdb` reader. This format is similar to
tab-delimited, but it also contains a header line after the column
name line denoting the type of each column (N for numeric, S for string).
"""
_format_name = 'fast_rdb'
_description = 'Tab-separated with a type definition header line'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t', 'data_start': 2}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
def _read_header(self):
tmp = self.engine.source
line1 = ''
line2 = ''
for line in tmp.splitlines():
# valid non-comment line
if not line1 and line.strip() and line.lstrip()[0] != self.comment:
line1 = line
elif not line2 and line.strip() and line.lstrip()[0] != self.comment:
line2 = line
break
else: # less than 2 lines in table
raise ValueError('RDB header requires 2 lines')
# Tokenize the two header lines separately.
# Each call to self.engine.read_header by default
# - calls _deduplicate_names to ensure unique header_names
# - sets self.names from self.header_names if not provided as kwarg
# - applies self.include_names/exclude_names to self.names.
# For parsing the types disable 1+3, but self.names needs to be set.
self.engine.setup_tokenizer([line2])
self.engine.header_start = 0
self.engine.read_header(deduplicate=False, filter_names=False)
types = self.engine.get_header_names()
# If no kwarg names have been passed, reset to have column names read from header line 1.
if types == self.engine.get_names():
self.engine.set_names([])
self.engine.setup_tokenizer([line1])
# Get full list of column names prior to applying include/exclude_names,
# which have to be applied to the unique name set after deduplicate.
self.engine.read_header(deduplicate=True, filter_names=False)
col_names = self.engine.get_names()
self.engine.read_header(deduplicate=False)
if len(col_names) != len(types):
raise core.InconsistentTableError('RDB header mismatch between number of '
'column names and column types')
# If columns have been removed via include/exclude_names, extract matching types.
if len(self.engine.get_names()) != len(types):
types = [types[col_names.index(n)] for n in self.engine.get_names()]
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types):
raise core.InconsistentTableError('RDB type definitions do not all match '
'[num](N|S): {}'.format(types))
try_int = {}
try_float = {}
try_string = {}
for name, col_type in zip(self.engine.get_names(), types):
if col_type[-1].lower() == 's':
try_int[name] = 0
try_float[name] = 0
try_string[name] = 1
else:
try_int[name] = 1
try_float[name] = 1
try_string[name] = 0
self.engine.setup_tokenizer(tmp)
return (try_int, try_float, try_string)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` to
output a line with column types after the column name line.
"""
self._write(table, output, {}, output_types=True)
| pllim/astropy | astropy/io/ascii/fastbasic.py | Python | bsd-3-clause | 16,348 |
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class PrzeklejPl(DeadHoster):
__name__ = "PrzeklejPl"
__type__ = "hoster"
__version__ = "0.13"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?przeklej\.pl/plik/.+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Przeklej.pl hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
getInfo = create_getInfo(PrzeklejPl)
| fzimmermann89/pyload | module/plugins/hoster/PrzeklejPl.py | Python | gpl-3.0 | 532 |
#
# jython examples for jas.
# $Id$
#
## \begin{PossoExample}
## \Name{Hawes2}
## \Parameters{a;b;c}
## \Variables{x;y[2];z[2]}
## \begin{Equations}
## x+2y_1z_1+3ay_1^2+5y_1^4+2cy_1 \&
## x+2y_2z_2+3ay_2^2+5y_2^4+2cy_2 \&
## 2 z_2+6ay_2+20 y_2^3+2c \&
## 3 z_1^2+y_1^2+b \&
## 3z_2^2+y_2^2+b \&
## \end{Equations}
## \end{PossoExample}
import sys;
from jas import Ring, PolyRing, QQ, ZZ, RF
from jas import startLog, terminate
# Hawes & Gibson example 2
# integral function coefficients, lexi
#r = Ring( "IntFunc(a, c, b) (y2, y1, z1, z2, x) L" );
#r = PolyRing( RF(PolyRing(QQ(),"a, c, b",PolyRing.lex)), "y2, y1, z1, z2, x", PolyRing.lex );
r = PolyRing( RF(PolyRing(ZZ(),"a, c, b",PolyRing.lex)), "y2, y1, z1, z2, x", PolyRing.lex );
print "Ring: " + str(r);
print;
ps = """
(
( x + 2 y1 z1 + { 3 a } y1^2 + 5 y1^4 + { 2 c } y1 ),
( x + 2 y2 z2 + { 3 a } y2^2 + 5 y2^4 + { 2 c } y2 ),
( 2 z2 + { 6 a } y2 + 20 y2^3 + { 2 c } ),
( 3 z1^2 + y1^2 + { b } ),
( 3 z2^2 + y2^2 + { b } )
)
""";
p1 = x + 2 * y1 * z1 + 3 * a * y1**2 + 5 * y1**4 + 2 * c * y1;
p2 = x + 2 * y2 * z2 + 3 * a * y2**2 + 5 * y2**4 + 2 * c * y2;
p3 = 2 * z2 + 6 * a * y2 + 20 * y2**3 + 2 * c;
p4 = 3 * z1**2 + y1**2 + b;
p5 = 3 * z2**2 + y2**2 + b;
F = [p1,p2,p3,p4,p5];
f = r.ideal( list=F );
#f = r.ideal( ps );
print "Ideal: " + str(f);
print;
#startLog();
rgl = f.GB();
print "GB:", rgl;
print;
bg = rgl.isGB();
print "isGB:", bg;
print;
startLog();
terminate();
#sys.exit();
| breandan/java-algebra-system | examples/hawes2_int.py | Python | gpl-2.0 | 1,478 |
import json
from google.protobuf.descriptor import FieldDescriptor as FD
class ParseError(Exception): pass
def json2pb(pb, js):
''' convert JSON string to google.protobuf.descriptor instance '''
for field in pb.DESCRIPTOR.fields:
if field.name not in js:
continue
if field.type == FD.TYPE_MESSAGE:
pass
elif field.type in _js2ftype:
ftype = _js2ftype[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
value = js[field.name]
if field.label == FD.LABEL_REPEATED:
pb_value = getattr(pb, field.name, None)
for v in value:
if field.type == FD.TYPE_MESSAGE:
json2pb(pb_value.add(), v)
else:
pb_value.append(ftype(v))
else:
if field.type == FD.TYPE_MESSAGE:
json2pb(getattr(pb, field.name, None), value)
else:
setattr(pb, field.name, ftype(value))
return pb
def pb2json(pb):
''' convert google.protobuf.descriptor instance to JSON string '''
js = {}
# fields = pb.DESCRIPTOR.fields #all fields
fields = pb.ListFields() #only filled (including extensions)
for field,value in fields:
if field.type == FD.TYPE_MESSAGE:
ftype = pb2json
elif field.type in _ftype2js:
ftype = _ftype2js[field.type]
else:
raise ParseError("Field %s.%s of type '%d' is not supported" % (pb.__class__.__name__, field.name, field.type, ))
if field.label == FD.LABEL_REPEATED:
js_value = []
for v in value:
js_value.append(ftype(v))
else:
js_value = ftype(value)
js[field.name] = js_value
return js
_ftype2js = {
FD.TYPE_DOUBLE: float,
FD.TYPE_FLOAT: float,
FD.TYPE_INT64: long,
FD.TYPE_UINT64: long,
FD.TYPE_INT32: int,
FD.TYPE_FIXED64: float,
FD.TYPE_FIXED32: float,
FD.TYPE_BOOL: bool,
FD.TYPE_STRING: unicode,
#FD.TYPE_MESSAGE: pb2json, #handled specially
FD.TYPE_BYTES: lambda x: x.encode('string_escape'),
FD.TYPE_UINT32: int,
FD.TYPE_ENUM: int,
FD.TYPE_SFIXED32: float,
FD.TYPE_SFIXED64: float,
FD.TYPE_SINT32: int,
FD.TYPE_SINT64: long,
}
_js2ftype = {
FD.TYPE_DOUBLE: float,
FD.TYPE_FLOAT: float,
FD.TYPE_INT64: long,
FD.TYPE_UINT64: long,
FD.TYPE_INT32: int,
FD.TYPE_FIXED64: float,
FD.TYPE_FIXED32: float,
FD.TYPE_BOOL: bool,
FD.TYPE_STRING: unicode,
# FD.TYPE_MESSAGE: json2pb, #handled specially
FD.TYPE_BYTES: lambda x: x.decode('string_escape'),
FD.TYPE_UINT32: int,
FD.TYPE_ENUM: int,
FD.TYPE_SFIXED32: float,
FD.TYPE_SFIXED64: float,
FD.TYPE_SINT32: int,
FD.TYPE_SINT64: long,
}
| cookingkode/gorpc | test/protobuf_json.py | Python | apache-2.0 | 2,497 |
Subsets and Splits