repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Yukarumya/Yukarum-Redfoxes | testing/marionette/harness/marionette_harness/tests/unit/test_screen_orientation.py | 1 | 3917 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver import errors
from mozrunner.devices.emulator_screen import EmulatorScreen
from marionette_harness import MarionetteTestCase, skip_if_desktop, skip_if_mobile
default_orientation = "portrait-primary"
unknown_orientation = "Unknown screen orientation: {}"
class TestScreenOrientation(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.is_mobile = self.marionette.session_capabilities.get("rotatable", False)
def tearDown(self):
if self.is_mobile:
self.marionette.set_orientation(default_orientation)
self.assertEqual(self.marionette.orientation, default_orientation, "invalid state")
MarionetteTestCase.tearDown(self)
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_portrait_primary(self):
self.marionette.set_orientation("portrait-primary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_landscape_primary(self):
self.marionette.set_orientation("landscape-primary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_portrait_secondary(self):
self.marionette.set_orientation("portrait-secondary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-secondary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_landscape_secondary(self):
self.marionette.set_orientation("landscape-secondary")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-secondary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_shorthand_portrait(self):
# Set orientation to something other than portrait-primary first, since the default is
# portrait-primary.
self.marionette.set_orientation("landscape-primary")
self.assertEqual(self.marionette.orientation, "landscape-primary", "invalid state")
self.marionette.set_orientation("portrait")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "portrait-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_to_shorthand_landscape(self):
self.marionette.set_orientation("landscape")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_orientation_with_mixed_casing(self):
self.marionette.set_orientation("lAnDsCaPe")
new_orientation = self.marionette.orientation
self.assertEqual(new_orientation, "landscape-primary")
@skip_if_desktop("Not supported in Firefox")
def test_set_invalid_orientation(self):
with self.assertRaisesRegexp(errors.MarionetteException, unknown_orientation.format("cheese")):
self.marionette.set_orientation("cheese")
@skip_if_desktop("Not supported in Firefox")
def test_set_null_orientation(self):
with self.assertRaisesRegexp(errors.MarionetteException, unknown_orientation.format("null")):
self.marionette.set_orientation(None)
@skip_if_mobile("Specific test for Firefox")
def test_unsupported_operation_on_desktop(self):
with self.assertRaises(errors.UnsupportedOperationException):
self.marionette.set_orientation("landscape-primary")
| mpl-2.0 | -3,902,149,290,655,037,400 | 44.546512 | 103 | 0.725045 | false |
crodjer/atg | tests/unit/test_activities.py | 1 | 2695 | # atg: a small timezone utility
# Copyright (C) 2015 Rohan Jain
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from atg.activities import Activities
from datetime import time
A = Activities
class ActivitiesTest(unittest.TestCase):
EXPECTED_ACTIVITIES = (
(0, A.sleep),
(1, A.sleep),
(2, A.sleep),
(3, A.sleep),
(4, A.sleep),
(5, A.sleep),
(6, A.sleep),
(7, A.sleep),
(8, A.available),
(9, A.available),
(10, A.work),
(11, A.work),
(12, A.work),
(13, A.work),
(14, A.work),
(15, A.work),
(16, A.work),
(17, A.work),
(18, A.available),
(19, A.available),
(20, A.available),
(21, A.available),
(22, A.available),
(23, A.sleep)
)
def test_is_current(self):
'''
Current hour check is working
'''
activities = set(Activities)
for hour, activity in self.EXPECTED_ACTIVITIES:
t = time(hour=hour)
self.assertTrue(
activity.value.is_current(t),
'{} should be {} time'.format(t, str(activity.name))
)
for o_activity in activities - {activity}:
self.assertFalse(
o_activity.value.is_current(t),
'{} should not be {} time'.format(t, str(o_activity.name))
)
def test_get_activity_at_time(self):
'''
Check if correct activity is guessed given a time.
'''
self.assertEqual(Activities.at(time(hour=1)), A.sleep.value)
self.assertEqual(Activities.at(time(hour=10)), A.work.value)
self.assertEqual(Activities.at(time(hour=20)), A.available.value)
def test_status(self):
'''
Activities have correct statuses
'''
self.assertEqual(Activities.work.value.status, 'working')
self.assertEqual(Activities.sleep.value.status, 'sleeping')
self.assertEqual(Activities.available.value.status, 'available')
| gpl-3.0 | -5,515,044,760,652,173,000 | 28.615385 | 78 | 0.5859 | false |
YannThorimbert/ThorPy-1.4.1 | thorpy/elements/browser.py | 1 | 5542 | # -*- coding: utf-8 -*-
import os
import pygame
from thorpy.elements.browserlight import BrowserLight
from thorpy.elements._explorerutils._pathelement import PathElement
from thorpy.elements.element import Element
from thorpy.elements.inserter import Inserter
from thorpy.elements.ddlf import DropDownListFast
from thorpy.elements.text import OneLineText
from thorpy.miscgui.storage import Storer
from thorpy.miscgui.reaction import Reaction
from thorpy.miscgui import constants, functions, parameters, style, painterstyle
class Browser(BrowserLight):
"""Either use it dynamically (create a menu based on an explorer that is
created "on fly", or statically adding an explorer to the main element"""
def __init__(self,
path="./",
ddl_size=None,
normal_params=None,
folders=True,
files=True,
file_types=None,
text=""):
ddl_size = style.BROWSERLIGHT_DDL_SIZE if ddl_size is None else ddl_size
super(BrowserLight, self).__init__(normal_params=normal_params)
self.path = path
self._ddl_size = ddl_size
self.file_types = file_types
self.folders = folders
self.files = files
# DropDownListFast
actual_folders, actual_files = self._get_folders_and_files()
actual_files = self._filter_files(actual_files)
if not folders:
actual_folders = None
if not files:
actual_files = []
self._ddlf = DropDownListFast(self._ddl_size, titles=actual_files,
folders=actual_folders, has_lift=True)
## self._ddlf.finish()
# selection button
inserter_width = 3*ddl_size[0]//4
## if inserter_width > style.MAX_INSERTER_WIDTH:
## inserter_width = style.MAX_INSERTER_WIDTH
self._selected = Inserter("Selected : ", size=(inserter_width, None))
self._selected.finish()
if isinstance(text, str):
self.text_element = OneLineText(text)
self.text_element.finish()
else:
self.text_element = text
self._path_element = PathElement(self, True)
self._path_element.finish()
self.add_elements([self.text_element, self._path_element, self._ddlf,
self._selected])
reac_pressed = Reaction(parameters.BUTTON_UNPRESS_EVENT,
self._reaction_press,
reac_name=constants.REAC_PRESSED)
## self._ddlf._force_lift = True
self._ddlf.finish()
self.add_reaction(reac_pressed)
self._clicked = None
self._something_selected = False
painter = functions.obtain_valid_painter(painterstyle.BOX_PAINTER,
pressed=True,
radius=style.BOX_RADIUS)
self.set_painter(painter)
self._last_click = -2 * parameters.DOUBLE_CLICK_DELAY
def finish(self):
self._path_element._set_path_elements()
Element.finish(self)
self.store()
centerx = self.get_fus_rect().centerx
self.text_element.set_center((centerx, None))
ycoord = self._path_element._elements[0].get_storer_rect().centery
self._path_element._set_path_elements(ycoord)
self.set_prison()
## def store(self):
#### r = self.get_family_rect()
#### self.set_size((r.width, r.height))
## storer = Storer(self, margins=style.BROWSERLIGHT_STORE_MARGINS,
## gaps=style.BROWSERLIGHT_STORE_GAPS)
## storer.autoset_framesize()
## def _refresh_ddlf_lift(self):
## if self._ddlf._lift:
## functions.remove_element(self._ddlf._lift)
## if self._ddlf.get_family_rect().height > self._ddlf.get_fus_rect().height:
## self._ddlf.add_lift()
## functions.refresh_current_menu()
def _refresh_ddlf(self):
self.path = self._path_element._path
actual_folders, actual_files = self._get_folders_and_files()
actual_files = self._filter_files(actual_files)
if not self.folders:
actual_folders = None
if not self.files:
actual_files = []
self._ddlf._dv = self._ddlf._get_dirviewer(titles=actual_files,
size=self._ddl_size,
folders=actual_folders)
self._refresh_ddlf_lift()
def _go_to_dir(self, selected):
self._path_element._path = selected
self._path_element._path_list = self._path_element._get_strs()
ycoord = self._path_element._elements[0].get_storer_rect().centery
self._path_element._set_path_elements(ycoord)
functions.refresh_current_menu()
self._refresh_ddlf()
self.unblit()
self.blit()
self.update()
def _reaction_press(self, event):
hit_lift = False
if self._ddlf._lift:
if self._ddlf._lift.get_fus_rect().collidepoint(event.pos):
hit_lift = True
if not hit_lift:
BrowserLight._reaction_press(self, event)
selected = self.get_value()
tick = pygame.time.get_ticks()
if os.path.isdir(selected):
if tick - self._last_click < parameters.DOUBLE_CLICK_DELAY:
self._go_to_dir(selected)
self._last_click = tick | mit | -6,465,121,483,309,031,000 | 40.365672 | 84 | 0.579033 | false |
avinet/adaptive3-qgis-plugin | dlgNewProjectBase.py | 1 | 2600 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dlgNewProject.ui'
#
# Created: Fri Apr 01 12:27:36 2016
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewProjectDialogBase(object):
def setupUi(self, NewProjectDialogBase):
NewProjectDialogBase.setObjectName(_fromUtf8("NewProjectDialogBase"))
NewProjectDialogBase.resize(228, 86)
self.buttonBox = QtGui.QDialogButtonBox(NewProjectDialogBase)
self.buttonBox.setGeometry(QtCore.QRect(20, 50, 201, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.splitter = QtGui.QSplitter(NewProjectDialogBase)
self.splitter.setGeometry(QtCore.QRect(20, 10, 196, 20))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.labelProjectName = QtGui.QLabel(self.splitter)
self.labelProjectName.setEnabled(True)
self.labelProjectName.setLayoutDirection(QtCore.Qt.LeftToRight)
self.labelProjectName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.labelProjectName.setObjectName(_fromUtf8("labelProjectName"))
self.lineProjectName = QtGui.QLineEdit(self.splitter)
self.lineProjectName.setObjectName(_fromUtf8("lineProjectName"))
self.retranslateUi(NewProjectDialogBase)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewProjectDialogBase.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewProjectDialogBase.reject)
QtCore.QMetaObject.connectSlotsByName(NewProjectDialogBase)
def retranslateUi(self, NewProjectDialogBase):
NewProjectDialogBase.setWindowTitle(_translate("NewProjectDialogBase", "New project", None))
self.labelProjectName.setText(_translate("NewProjectDialogBase", "Project name", None))
| mit | 3,318,942,337,191,141,000 | 46.272727 | 115 | 0.740385 | false |
TAMU-CPT/galaxy-tools | tools/webapollo/create_or_update_organism.py | 1 | 3514 | #!/usr/bin/env python
import sys
import json
import argparse
import time
from webapollo import WAAuth, WebApolloInstance, OrgOrGuess, GuessOrg, AssertUser
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create or update an organism in an Apollo instance"
)
WAAuth(parser)
parser.add_argument("jbrowse", help="JBrowse Data Directory")
parser.add_argument("email", help="User Email")
OrgOrGuess(parser)
parser.add_argument("--genus", help="Organism Genus")
parser.add_argument("--species", help="Organism Species")
parser.add_argument("--public", action="store_true", help="Make organism public")
parser.add_argument("--group", help="Give access to a user group")
args = parser.parse_args()
wa = WebApolloInstance(args.apollo, args.username, args.password)
org_cn = GuessOrg(args, wa)
if isinstance(org_cn, list):
org_cn = org_cn[0]
# User must have an account
gx_user = AssertUser(wa.users.loadUsers(email=args.email))
log.info("Determining if add or update required")
try:
org = wa.organisms.findOrganismByCn(org_cn)
except Exception:
org = None
if org:
has_perms = False
for user_owned_organism in gx_user.organismPermissions:
if "WRITE" in user_owned_organism["permissions"]:
has_perms = True
break
if not has_perms:
print(
"Naming Conflict. You do not have permissions to access this organism. Either request permission from the owner, or choose a different name for your organism."
)
sys.exit(2)
log.info("\tUpdating Organism")
data = wa.organisms.updateOrganismInfo(
org["id"],
org_cn,
args.jbrowse,
# mandatory
genus=args.genus,
species=args.species,
public=args.public,
)
time.sleep(20)
data = [wa.organisms.findOrganismById(org["id"])]
else:
# New organism
log.info("Adding Organism")
try:
data = wa.organisms.addOrganism(
org_cn,
args.jbrowse,
genus=args.genus,
species=args.species,
public=args.public,
)
except Exception as errorOut:
log.info("Exception on Organism Common Name '" + org_cn + "':")
log.info(errorOut)
if str(errorOut)[-3:] == "504":
log.info("\nThe Galaxy server timed out while waiting for Apollo to finish. Your organism was most likely created, but will need to be manually assigned to your account by an administrator. Please submit a bug report for this job and we will get back to you shortly.\n")
exit(2)
# Must sleep before we're ready to handle
time.sleep(20)
log.info("Updating permissions for %s on %s", gx_user, org_cn)
wa.users.updateOrganismPermission(
gx_user, org_cn, write=True, export=True, read=True
)
# Group access
if args.group:
group = wa.groups.loadGroupByName(name=args.group)
res = wa.groups.updateOrganismPermission(
group, org_cn, administrate=False, write=True, read=True, export=True
)
data = [o for o in data if o["commonName"] == org_cn]
print(json.dumps(data, indent=2))
| gpl-3.0 | 1,875,516,418,846,996,000 | 33.792079 | 284 | 0.610985 | false |
team-hdnet/hdnet | tests/test_spikes.py | 1 | 2011 | # -*- coding: utf-8 -*-
# This file is part of the hdnet package
# Copyright 2014 the authors, see file AUTHORS.
# Licensed under the GPLv3, see file LICENSE for details
import os
import numpy as np
from hdnet.spikes import Spikes
from hdnet.util import hdlog
from test_tmppath import TestTmpPath
class TestSpikes(TestTmpPath):
def setUp(self):
super(TestSpikes, self).setUp()
import logging
logging.disable(level=logging.WARNING)
def tearDown(self):
super(TestSpikes, self).tearDown()
def test_basic(self):
file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/tiny_spikes.npz'))
spikes = Spikes(file_contents[file_contents.keys()[0]])
self.assertEqual(spikes._spikes.sum(), 10)
self.assertEqual(spikes.rasterize(stop=5).sum(), 8)
spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes'))
self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png')))
file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz'))
spikes = Spikes(file_contents[file_contents.keys()[0]])
spikes.rasterize(save_png_name=os.path.join(self.TMP_PATH, 'spikes'))
self.assertTrue(os.path.exists(os.path.join(self.TMP_PATH, 'spikes.png')))
file_contents = np.load(os.path.join(os.path.dirname(__file__), 'test_data/spikes_trials.npz'))
spikes = Spikes(file_contents[file_contents.keys()[0]])
spikes.restrict_to_most_active_neurons(top_neurons=2)
self.assertEqual(spikes._N, 2)
def test_saving(self):
spikes = Spikes(spikes=np.array([[1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0]]))
hdlog.info(spikes.spikes)
spikes.save(os.path.join(self.TMP_PATH, 'spikes'))
spikes2 = Spikes.load(os.path.join(self.TMP_PATH, 'spikes'))
hdlog.info(spikes2.spikes)
self.assertTrue((spikes.spikes == spikes2.spikes).all())
# end of source
| gpl-3.0 | 379,902,650,807,564,600 | 36.240741 | 103 | 0.652909 | false |
Com-Mean/MLinAcition | chapter2/numpyIntro.py | 1 | 1099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# File Name: numpyIntro.py
# Author: lpqiu
# mail: [email protected]
# Created Time: 2014年09月06日 星期六 16时33分05秒
#########################################################################
import numpy as np
def triangleWave(x, c, c0, hc=1.0):
x = x - int(x)
ret = 0
if x >= c:
ret = 0
elif x < c0:
ret = (hc/c0)*x
else:
ret = (hc/(c0 -c))*(x - c)
return ret
def triangleFunc(x, c, c0, hc=1.0):
def trgFun(x):
x = x - int(x)
ret = 0
if x >= c:
ret = 0
elif x < c0:
ret = (hc/c0)*x
else:
ret = (hc/(c0 -c))*(x - c)
return ret
return np.frompyfunc(trgFun, 1, 1)
if __name__=="__main__":
x = np.linspace(0, 2, 1000)
y = np.array([triangleWave(t, 0.6, 0.4, 1.0) for t in x])
triangleFun = np.frompyfunc(lambda x: triangleWave(0.6, 0.4, 1.0), 1, 1)
y2 = triangleFun(x)
y3 = triangleFunc(0.6, 0.4, 1.0)(x)
| gpl-3.0 | -7,973,966,228,759,885,000 | 24.139535 | 76 | 0.427382 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/graphicsview/elasticnodes.py | 1 | 13943 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2006-2006 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact [email protected] if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
import weakref
import math
from PySide import QtCore, QtGui
class Edge(QtGui.QGraphicsItem):
Pi = math.pi
TwoPi = 2.0 * Pi
Type = QtGui.QGraphicsItem.UserType + 2
def __init__(self, sourceNode, destNode):
QtGui.QGraphicsItem.__init__(self)
self.arrowSize = 10.0
self.sourcePoint = QtCore.QPointF()
self.destPoint = QtCore.QPointF()
self.setAcceptedMouseButtons(QtCore.Qt.NoButton)
self.source = weakref.ref(sourceNode)
self.dest = weakref.ref(destNode)
self.source().addEdge(self)
self.dest().addEdge(self)
self.adjust()
def type(self):
return Edge.Type
def sourceNode(self):
return self.source()
def setSourceNode(self, node):
self.source = weakref.ref(node)
self.adjust()
def destNode(self):
return self.dest()
def setDestNode(self, node):
self.dest = weakref.ref(node)
self.adjust()
def adjust(self):
if not self.source() or not self.dest():
return
line = QtCore.QLineF(self.mapFromItem(self.source(), 0, 0), self.mapFromItem(self.dest(), 0, 0))
length = line.length()
if length == 0.0:
return
edgeOffset = QtCore.QPointF((line.dx() * 10) / length, (line.dy() * 10) / length)
self.prepareGeometryChange()
self.sourcePoint = line.p1() + edgeOffset
self.destPoint = line.p2() - edgeOffset
def boundingRect(self):
if not self.source() or not self.dest():
return QtCore.QRectF()
penWidth = 1
extra = (penWidth + self.arrowSize) / 2.0
return QtCore.QRectF(self.sourcePoint,
QtCore.QSizeF(self.destPoint.x() - self.sourcePoint.x(),
self.destPoint.y() - self.sourcePoint.y())).normalized().adjusted(-extra, -extra, extra, extra)
def paint(self, painter, option, widget):
if not self.source() or not self.dest():
return
# Draw the line itself.
line = QtCore.QLineF(self.sourcePoint, self.destPoint)
if line.length() == 0.0:
return
painter.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
painter.drawLine(line)
# Draw the arrows if there's enough room.
angle = math.acos(line.dx() / line.length())
if line.dy() >= 0:
angle = Edge.TwoPi - angle
sourceArrowP1 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi / 3) * self.arrowSize)
sourceArrowP2 = self.sourcePoint + QtCore.QPointF(math.sin(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize,
math.cos(angle + Edge.Pi - Edge.Pi / 3) * self.arrowSize);
destArrowP1 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi / 3) * self.arrowSize)
destArrowP2 = self.destPoint + QtCore.QPointF(math.sin(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize,
math.cos(angle - Edge.Pi + Edge.Pi / 3) * self.arrowSize)
painter.setBrush(QtCore.Qt.black)
painter.drawPolygon(QtGui.QPolygonF([line.p1(), sourceArrowP1, sourceArrowP2]))
painter.drawPolygon(QtGui.QPolygonF([line.p2(), destArrowP1, destArrowP2]))
class Node(QtGui.QGraphicsItem):
Type = QtGui.QGraphicsItem.UserType + 1
def __init__(self, graphWidget):
QtGui.QGraphicsItem.__init__(self)
self.graph = weakref.ref(graphWidget)
self.edgeList = []
self.newPos = QtCore.QPointF()
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(self.DeviceCoordinateCache)
self.setZValue(-1)
def type(self):
return Node.Type
def addEdge(self, edge):
self.edgeList.append(weakref.ref(edge))
edge.adjust()
def edges(self):
return self.edgeList
def calculateForces(self):
if not self.scene() or self.scene().mouseGrabberItem() is self:
self.newPos = self.pos()
return
# Sum up all forces pushing this item away.
xvel = 0.0
yvel = 0.0
for item in self.scene().items():
if not isinstance(item, Node):
continue
line = QtCore.QLineF(self.mapFromItem(item, 0, 0), QtCore.QPointF(0, 0))
dx = line.dx()
dy = line.dy()
l = 2.0 * (dx * dx + dy * dy)
if l > 0:
xvel += (dx * 150.0) / l
yvel += (dy * 150.0) / l
# Now subtract all forces pulling items together.
weight = (len(self.edgeList) + 1) * 10.0
for edge in self.edgeList:
if edge().sourceNode() is self:
pos = self.mapFromItem(edge().destNode(), 0, 0)
else:
pos = self.mapFromItem(edge().sourceNode(), 0, 0)
xvel += pos.x() / weight
yvel += pos.y() / weight
if QtCore.qAbs(xvel) < 0.1 and QtCore.qAbs(yvel) < 0.1:
xvel = yvel = 0.0
sceneRect = self.scene().sceneRect()
self.newPos = self.pos() + QtCore.QPointF(xvel, yvel)
self.newPos.setX(min(max(self.newPos.x(), sceneRect.left() + 10), sceneRect.right() - 10))
self.newPos.setY(min(max(self.newPos.y(), sceneRect.top() + 10), sceneRect.bottom() - 10))
def advance(self):
if self.newPos == self.pos():
return False
self.setPos(self.newPos)
return True
def boundingRect(self):
adjust = 2.0
return QtCore.QRectF(-10 - adjust, -10 - adjust,
23 + adjust, 23 + adjust)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(-10, -10, 20, 20)
return path
def paint(self, painter, option, widget):
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtCore.Qt.darkGray)
painter.drawEllipse(-7, -7, 20, 20)
gradient = QtGui.QRadialGradient(-3, -3, 10)
if option.state & QtGui.QStyle.State_Sunken:
gradient.setCenter(3, 3)
gradient.setFocalPoint(3, 3)
gradient.setColorAt(1, QtGui.QColor(QtCore.Qt.yellow).lighter(120))
gradient.setColorAt(0, QtGui.QColor(QtCore.Qt.darkYellow).lighter(120))
else:
gradient.setColorAt(0, QtCore.Qt.yellow)
gradient.setColorAt(1, QtCore.Qt.darkYellow)
painter.setBrush(QtGui.QBrush(gradient))
painter.setPen(QtGui.QPen(QtCore.Qt.black, 0))
painter.drawEllipse(-10, -10, 20, 20)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemPositionChange:
for edge in self.edgeList:
edge().adjust()
self.graph().itemMoved()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def mousePressEvent(self, event):
self.update()
QtGui.QGraphicsItem.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
self.update()
QtGui.QGraphicsItem.mouseReleaseEvent(self, event)
class GraphWidget(QtGui.QGraphicsView):
def __init__(self):
QtGui.QGraphicsView.__init__(self)
self.timerId = 0
scene = QtGui.QGraphicsScene(self)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
scene.setSceneRect(-200, -200, 400, 400)
self.setScene(scene)
self.setCacheMode(QtGui.QGraphicsView.CacheBackground)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setTransformationAnchor(QtGui.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtGui.QGraphicsView.AnchorViewCenter)
node1 = Node(self)
node2 = Node(self)
node3 = Node(self)
node4 = Node(self)
self.centerNode = Node(self)
node6 = Node(self)
node7 = Node(self)
node8 = Node(self)
node9 = Node(self)
scene.addItem(node1)
scene.addItem(node2)
scene.addItem(node3)
scene.addItem(node4)
scene.addItem(self.centerNode)
scene.addItem(node6)
scene.addItem(node7)
scene.addItem(node8)
scene.addItem(node9)
scene.addItem(Edge(node1, node2))
scene.addItem(Edge(node2, node3))
scene.addItem(Edge(node2, self.centerNode))
scene.addItem(Edge(node3, node6))
scene.addItem(Edge(node4, node1))
scene.addItem(Edge(node4, self.centerNode))
scene.addItem(Edge(self.centerNode, node6))
scene.addItem(Edge(self.centerNode, node8))
scene.addItem(Edge(node6, node9))
scene.addItem(Edge(node7, node4))
scene.addItem(Edge(node8, node7))
scene.addItem(Edge(node9, node8))
node1.setPos(-50, -50)
node2.setPos(0, -50)
node3.setPos(50, -50)
node4.setPos(-50, 0)
self.centerNode.setPos(0, 0)
node6.setPos(50, 0)
node7.setPos(-50, 50)
node8.setPos(0, 50)
node9.setPos(50, 50)
self.scale(0.8, 0.8)
self.setMinimumSize(400, 400)
self.setWindowTitle(self.tr("Elastic Nodes"))
def itemMoved(self):
if not self.timerId:
self.timerId = self.startTimer(1000 / 25)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Up:
self.centerNode.moveBy(0, -20)
elif key == QtCore.Qt.Key_Down:
self.centerNode.moveBy(0, 20)
elif key == QtCore.Qt.Key_Left:
self.centerNode.moveBy(-20, 0)
elif key == QtCore.Qt.Key_Right:
self.centerNode.moveBy(20, 0)
elif key == QtCore.Qt.Key_Plus:
self.scaleView(1.2)
elif key == QtCore.Qt.Key_Minus:
self.scaleView(1 / 1.2)
elif key == QtCore.Qt.Key_Space or key == QtCore.Qt.Key_Enter:
for item in self.scene().items():
if isinstance(item, Node):
item.setPos(-150 + QtCore.qrand() % 300, -150 + QtCore.qrand() % 300)
else:
QtGui.QGraphicsView.keyPressEvent(self, event)
def timerEvent(self, event):
nodes = [item for item in self.scene().items() if isinstance(item, Node)]
for node in nodes:
node.calculateForces()
itemsMoved = False
for node in nodes:
if node.advance():
itemsMoved = True
if not itemsMoved:
self.killTimer(self.timerId)
self.timerId = 0
def wheelEvent(self, event):
self.scaleView(math.pow(2.0, -event.delta() / 240.0))
def drawBackground(self, painter, rect):
# Shadow.
sceneRect = self.sceneRect()
rightShadow = QtCore.QRectF(sceneRect.right(), sceneRect.top() + 5, 5, sceneRect.height())
bottomShadow = QtCore.QRectF(sceneRect.left() + 5, sceneRect.bottom(), sceneRect.width(), 5)
if rightShadow.intersects(rect) or rightShadow.contains(rect):
painter.fillRect(rightShadow, QtCore.Qt.darkGray)
if bottomShadow.intersects(rect) or bottomShadow.contains(rect):
painter.fillRect(bottomShadow, QtCore.Qt.darkGray)
# Fill.
gradient = QtGui.QLinearGradient(sceneRect.topLeft(), sceneRect.bottomRight())
gradient.setColorAt(0, QtCore.Qt.white)
gradient.setColorAt(1, QtCore.Qt.lightGray)
painter.fillRect(rect.intersect(sceneRect), QtGui.QBrush(gradient))
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawRect(sceneRect)
# Text.
textRect = QtCore.QRectF(sceneRect.left() + 4, sceneRect.top() + 4,
sceneRect.width() - 4, sceneRect.height() - 4)
message = self.tr("Click and drag the nodes around, and zoom with the "
"mouse wheel or the '+' and '-' keys")
font = painter.font()
font.setBold(True)
font.setPointSize(14)
painter.setFont(font)
painter.setPen(QtCore.Qt.lightGray)
painter.drawText(textRect.translated(2, 2), message)
painter.setPen(QtCore.Qt.black)
painter.drawText(textRect, message)
def scaleView(self, scaleFactor):
factor = self.matrix().scale(scaleFactor, scaleFactor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()
if factor < 0.07 or factor > 100:
return
self.scale(scaleFactor, scaleFactor)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
QtCore.qsrand(QtCore.QTime(0,0,0).secsTo(QtCore.QTime.currentTime()))
widget = GraphWidget()
widget.show()
sys.exit(app.exec_())
| epl-1.0 | -5,084,416,713,089,854,000 | 34.388325 | 138 | 0.588324 | false |
meisamhe/GPLshared | ML/HW4Q2RunAlgorithms.py | 1 | 3869 | __author__ = 'MeisamHe'
#=================================Start with the first data set farm ads=======================================================
import csv
import sys
import math
import random
import numpy as np
import os
#-----------------------------------------------------------------------------
# Read the Ham file
#-----------------------------------------------------------------------------
currentDatasetName = "BankData"
# "FeatureSelectedDatasetARFF"
#"DresssSales"
#
numiter = 30
# test also for 30
# test also for 100
# test also for 150
newDatasetPath = "C:\\Users\\MeisamHe\\Desktop\\BackupToRestoreComputerApril15\\MachineLearning\\HW\\HW4\\DataSets\\ARFF\\tenfold\\"
#classifier = 'weka.classifiers.trees.J48 -C 0.25' # options -C 0.25
#classifier = 'weka.classifiers.functions.Logistic'
#classifier = 'weka.classifiers.bayes.NaiveBayes'
classifier = 'weka.classifiers.trees.DecisionStump'
if classifier== 'weka.classifiers.trees.J48 -C 0.25':
testclassifier = 'weka.classifiers.trees.J48'
else:
testclassifier = classifier
baggingclassifier = 'weka.classifiers.meta.Bagging -I %s -W %s'%(numiter, classifier) #options -W Full name of base classifier. (default: weka.classifiers.trees.REPTree)
# option -I number of iterations
#e.g. weka.classifiers.meta.Bagging -P 100 -S 1 -num-slots 1 -I 10 -W weka.classifiers.trees.J48 -- -C 0.25 -M 2
boostingclassifier = 'weka.classifiers.meta.AdaBoostM1 -I %s -P 100000 -W %s'%(numiter, classifier) # option -I number of iterations
# option -P 100000 (percentage of weight mass used for training)
#for Bagging
# classifier = baggingclassifier#'weka.classifiers.meta.Bagging -I %s -W %s'%(numiter, classifier) #-- -C 0.25
# testclassifier = 'weka.classifiers.meta.Bagging'
# for boosting
classifier = boostingclassifier
testclassifier = 'weka.classifiers.meta.AdaBoostM1'
# classifier = boostingclassifier
Wekapath = "C:/Python27/jars/weka.jar"
for i in range(1,11,1):
validFile = newDatasetPath+"valid"+currentDatasetName+"%s"%i+".arff"
trainFile = newDatasetPath+"train"+currentDatasetName+"%s"%i+".arff"
inputFile = trainFile
modelFile = "C:\\Users\\MeisamHe\\Desktop\\BackupToRestoreComputerApril15\\MachineLearning\\HW\\HW4\\DataSets\\ARFF\\models\\modelFile%s%s.wkm"%(currentDatasetName,i)
trainingOutput = "C:\\Users\\MeisamHe\\Desktop\\BackupToRestoreComputerApril15\\MachineLearning\\HW\\HW4\\DataSets\\ARFF\\trainingResults\\trainingOutput%s%s.txt"%(currentDatasetName,i)
trainingCommand = 'java -cp "%s" %s -t "%s" -d "%s" > "%s"'% (Wekapath, classifier,inputFile,modelFile,trainingOutput)
#print 'training command is%s'%trainingCommand
print "running the classifier \"%s\" on the training data set %s in the %s fold"%(classifier,currentDatasetName,i)
os.system(trainingCommand)
#---------------------running validation------------------------------
testInputFile = validFile
testOutput = "C:\\Users\\MeisamHe\\Desktop\\BackupToRestoreComputerApril15\\MachineLearning\\HW\\HW4\\DataSets\\ARFF\\testOutput\\testOutput%s%s.txt"%(currentDatasetName,i)
testCommand = 'java -cp "%s" %s -l "%s" -T "%s" > "%s"'% (Wekapath, testclassifier,modelFile,testInputFile,testOutput)
print "running the classifier \"%s\" on the validation data set %s in the %s fold"%(classifier,currentDatasetName,i)
os.system(testCommand)
# #Calculate Accuracy of Perceptron Algorithm
# f = open(testOutput,'r')
# temp = f.read()
# weights0 = re.findall('Threshold\s*([-0-9.E]+)', temp)
# print 'weight 0 is:%s'%weights0
# weightVector = re.findall('Attrib something[0-9]*\s*([-0-9.E]+)', temp)
# print 'the weight vector is:%s'%weightVector
| gpl-3.0 | -3,387,909,138,458,844,700 | 49.907895 | 189 | 0.637891 | false |
stryder199/RyarkAssignments | Assignment2/web2py/gluon/decoder.py | 1 | 3038 | import codecs, encodings
"""Caller will hand this library a buffer and ask it to either convert
it or auto-detect the type.
Based on http://code.activestate.com/recipes/52257/
Licensed under the PSF License
"""
# None represents a potentially variable byte. "##" in the XML spec...
autodetect_dict={ # bytepattern : ("name",
(0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"),
(0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"),
(0xFE, 0xFF, None, None) : ("utf_16_be"),
(0xFF, 0xFE, None, None) : ("utf_16_le"),
(0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"),
(0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"),
(0x3C, 0x3F, 0x78, 0x6D): ("utf_8"),
(0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC")
}
def autoDetectXMLEncoding(buffer):
""" buffer -> encoding_name
The buffer should be at least 4 bytes long.
Returns None if encoding cannot be detected.
Note that encoding_name might not have an installed
decoder (e.g. EBCDIC)
"""
# a more efficient implementation would not decode the whole
# buffer at once but otherwise we'd have to decode a character at
# a time looking for the quote character...that's a pain
encoding = "utf_8" # according to the XML spec, this is the default
# this code successively tries to refine the default
# whenever it fails to refine, it falls back to
# the last place encoding was set.
if len(buffer)>=4:
bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4]))
enc_info = autodetect_dict.get(bytes, None)
if not enc_info: # try autodetection again removing potentially
# variable bytes
bytes = (byte1, byte2, None, None)
enc_info = autodetect_dict.get(bytes)
else:
enc_info = None
if enc_info:
encoding = enc_info # we've got a guess... these are
#the new defaults
# try to find a more precise encoding using xml declaration
secret_decoder_ring = codecs.lookup(encoding)[1]
(decoded,length) = secret_decoder_ring(buffer)
first_line = decoded.split("\n")[0]
if first_line and first_line.startswith(u"<?xml"):
encoding_pos = first_line.find(u"encoding")
if encoding_pos!=-1:
# look for double quote
quote_pos=first_line.find('"', encoding_pos)
if quote_pos==-1: # look for single quote
quote_pos=first_line.find("'", encoding_pos)
if quote_pos>-1:
quote_char,rest=(first_line[quote_pos],
first_line[quote_pos+1:])
encoding=rest[:rest.find(quote_char)]
return encoding
def decoder(buffer):
encoding = autoDetectXMLEncoding(buffer)
return buffer.decode(encoding).encode('utf8')
| mit | -9,123,745,468,234,891,000 | 40.054054 | 78 | 0.560895 | false |
FreeON/spammpack | src-C/prototype/stream_multiply/generate_SSE_assembly.py | 1 | 19562 | #!/usr/bin/python
#
# Generate SSE assembly code for a kernel operating on a 4x4 blocks.
import math, optparse, sys
class box:
def __init__ (self, i_1, i_2, j_1, j_2):
self.i_1 = i_1
self.i_2 = i_2
self.j_1 = j_1
self.j_2 = j_2
def __str__ (self):
return "box: [%d-%d][%d-%d]" % (self.i_1, self.i_2, self.j_1, self.j_2)
class counter:
def __init__ (self):
self.counter = 0
def __init__ (self, initial_value):
self.counter = initial_value
def increment (self):
self.counter += 1
def get (self):
return self.counter
# Generate matrix product with Z-curve ordering.
def generate_Z_curve (A, B, C, block_counter):
if A.i_2-A.i_1 == 1 and A.j_2-A.j_1 == 1:
i = C.i_1
j = C.j_1
k = A.j_1
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Check norm of product A(%d,%d)*B(%d,%d)." % (i+1, k+1, k+1, j+1)
print padding + "movss 0x%x(multiply_stream, base_pointer), B1" % ((i*options.N+k)*4+24)
print padding + "mulss 0x%x(multiply_stream, base_pointer), B1" % ((k*options.N+j+options.N**2)*4+24)
print padding + "comiss tolerance, B1"
print padding + "jb block_%d" % (block_counter.get())
print
print padding + "# Reset C(%d,%d) matrix block accumulators." % (i+1, j+1)
print padding + "xorps C1, C1"
print padding + "xorps C2, C2"
print padding + "xorps C3, C3"
print padding + "xorps C4, C4"
print
print padding + "# Calculate C(%d,%d) = A(%d,%d)*B(%d,%d)." % (i+1, j+1, i+1, k+1, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B1" % (0*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B2" % (1*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B3" % (2*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B4" % (3*4*4, k+1, j+1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+0)*4*4, i+1, k+1, 1, 1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+1)*4*4, i+1, k+1, 1, 2)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+2)*4*4, i+1, k+1, 1, 3)
print padding + "mulps B1, A11"
print padding + "mulps B2, A12"
print padding + "addps A11, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+3)*4*4, i+1, k+1, 1, 4)
print padding + "mulps B3, A13"
print padding + "addps A12, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+0)*4*4, i+1, k+1, 2, 1)
print padding + "mulps B4, A14"
print padding + "addps A13, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+1)*4*4, i+1, k+1, 2, 2)
print padding + "mulps B1, A21"
print padding + "addps A14, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+2)*4*4, i+1, k+1, 2, 3)
print padding + "mulps B2, A22"
print padding + "addps A21, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+3)*4*4, i+1, k+1, 2, 4)
print padding + "mulps B3, A23"
print padding + "addps A22, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+0)*4*4, i+1, k+1, 3, 1)
print padding + "mulps B4, A24"
print padding + "addps A23, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+1)*4*4, i+1, k+1, 3, 2)
print padding + "mulps B1, A31"
print padding + "addps A24, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+2)*4*4, i+1, k+1, 3, 3)
print padding + "mulps B2, A32"
print padding + "addps A31, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+3)*4*4, i+1, k+1, 3, 4)
print padding + "mulps B3, A33"
print padding + "addps A32, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+0)*4*4, i+1, k+1, 4, 1)
print padding + "mulps B4, A34"
print padding + "addps A33, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+1)*4*4, i+1, k+1, 4, 2)
print padding + "mulps B1, A41"
print padding + "addps A34, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+2)*4*4, i+1, k+1, 4, 3)
print padding + "mulps B2, A42"
print padding + "addps A41, C4"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+3)*4*4, i+1, k+1, 4, 4)
print padding + "mulps B3, A43"
print padding + "addps A42, C4"
print padding + "mulps B4, A44"
print padding + "addps A43, C4"
print padding + "addps A44, C4"
print
print padding + "# Multiply C(%d,%d) by alpha." % (i+1, j+1)
print padding + "mulps alpha, C1"
print padding + "mulps alpha, C2"
print padding + "mulps alpha, C3"
print padding + "mulps alpha, C4"
print
print padding + "# Add accumulated C(%d,%d) to already existing." % (i+1, j+1)
print padding + "addps 0x0+C_OFFSET_%d%d(C), C1" % (i+1, j+1)
print padding + "addps 0x10+C_OFFSET_%d%d(C), C2" % (i+1, j+1)
print padding + "addps 0x20+C_OFFSET_%d%d(C), C3" % (i+1, j+1)
print padding + "addps 0x30+C_OFFSET_%d%d(C), C4" % (i+1, j+1)
print
print padding + "# Write out C(%d,%d) submatrix block." % (i+1, j+1)
print padding + "movaps C1, 0x0+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C2, 0x10+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C3, 0x20+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C4, 0x30+C_OFFSET_%d%d(C)" % (i+1, j+1)
else:
A_11 = box(A.i_1, A.i_1+(A.i_2-A.i_1)/2, A.j_1, A.j_1+(A.j_2-A.j_1)/2)
A_12 = box(A.i_1, A.i_1+(A.i_2-A.i_1)/2, A.j_1+(A.j_2-A.j_1)/2, A.j_2)
A_21 = box(A.i_1+(A.i_2-A.i_1)/2, A.i_2, A.j_1, A.j_1+(A.j_2-A.j_1)/2)
A_22 = box(A.i_1+(A.i_2-A.i_1)/2, A.i_2, A.j_1+(A.j_2-A.j_1)/2, A.j_2)
B_11 = box(B.i_1, B.i_1+(B.i_2-B.i_1)/2, B.j_1, B.j_1+(B.j_2-B.j_1)/2)
B_12 = box(B.i_1, B.i_1+(B.i_2-B.i_1)/2, B.j_1+(B.j_2-B.j_1)/2, B.j_2)
B_21 = box(B.i_1+(B.i_2-B.i_1)/2, B.i_2, B.j_1, B.j_1+(B.j_2-B.j_1)/2)
B_22 = box(B.i_1+(B.i_2-B.i_1)/2, B.i_2, B.j_1+(B.j_2-B.j_1)/2, B.j_2)
C_11 = box(C.i_1, C.i_1+(C.i_2-C.i_1)/2, C.j_1, C.j_1+(C.j_2-C.j_1)/2)
C_12 = box(C.i_1, C.i_1+(C.i_2-C.i_1)/2, C.j_1+(C.j_2-C.j_1)/2, C.j_2)
C_21 = box(C.i_1+(C.i_2-C.i_1)/2, C.i_2, C.j_1, C.j_1+(C.j_2-C.j_1)/2)
C_22 = box(C.i_1+(C.i_2-C.i_1)/2, C.i_2, C.j_1+(C.j_2-C.j_1)/2, C.j_2)
generate_Z_curve(A_11, B_11, C_11, block_counter)
generate_Z_curve(A_12, B_21, C_11, block_counter)
generate_Z_curve(A_11, B_12, C_12, block_counter)
generate_Z_curve(A_12, B_22, C_12, block_counter)
generate_Z_curve(A_21, B_11, C_21, block_counter)
generate_Z_curve(A_22, B_21, C_21, block_counter)
generate_Z_curve(A_21, B_12, C_22, block_counter)
generate_Z_curve(A_22, B_22, C_22, block_counter)
# Main program.
parser = optparse.OptionParser(description =
"""This script generates a stream element kernel operating on 4x4 matrix
blocks. The kernel generated is written using assembly instructions assuming a
processor with SSE2.""")
parser.add_option("-N",
metavar = "N",
help = "generate fully unrolled kernel for NxN matrix of 4x4 matrix blocks [default: %default]",
dest = "N",
type = "int",
default = 1)
parser.add_option("--unroll",
metavar = "N",
help = "fully unroll loops only at and below a matrix size of NxN [default: %default]",
dest = "N_unroll",
type = "int",
default = 1)
parser.add_option("--name",
metavar = "func",
help = "set function name to \"func\" [default: %default]",
dest = "functionName",
type = "string",
default = "stream_kernel")
parser.add_option("--no-checks",
action = "store_false",
default = True,
help = "generate code without any norm checks [default: %default]",
dest = "generate_checks")
parser.add_option("--Z-curve",
action = "store_true",
default = False,
help = """layout the multiply along a Z-curve as opposed to regular
row-major ordering [default: %default]""",
dest = "Z_curve_ordering")
( options, arguments ) = parser.parse_args()
# Check N.
if options.N <= 0:
print "N needs to be a positive number > 0"
sys.exit(1)
d = int(math.log(options.N)/math.log(2))
if 2**d != options.N:
print "N needs to be a power of 2"
sys.exit(1)
# Check loop unrolling.
if options.N_unroll <= 0:
options.N_unroll = 1
if options.N_unroll > options.N:
options.N_unroll = options.N
# Assembly code indentation.
padding = " "
# Generate assembly code.
print "# This code was auto-generated by %s." % (sys.argv[0])
print "# The command line given was:"
print "#"
sys.stdout.write("# ")
for i in range(len(sys.argv)):
sys.stdout.write(" %s" % (sys.argv[i]))
sys.stdout.write("\n")
# Define some things.
print
print "# Function ABI."
print "#define number_stream_elements %rdi"
print "#define alpha %xmm0"
print "#define tolerance %xmm1"
print "#define multiply_stream %rsi"
print
print "# Define SSE registers used for C matrix"
print "#define C1 %xmm2"
print "#define C2 %xmm3"
print "#define C3 %xmm4"
print "#define C4 %xmm5"
print
print "# Define SSE registeres used for B matrix"
print "#define B1 %xmm6"
print "#define B2 %xmm7"
print "#define B3 %xmm8"
print "#define B4 %xmm9"
print
print "# Define SSE registeres used for A matrix"
print "#define A11 %xmm10"
print "#define A12 %xmm11"
print "#define A13 %xmm12"
print "#define A14 %xmm13"
print "#define A21 %xmm14"
print "#define A22 %xmm15"
print "#define A23 %xmm10"
print "#define A24 %xmm11"
print "#define A31 %xmm12"
print "#define A32 %xmm13"
print "#define A33 %xmm14"
print "#define A34 %xmm15"
print "#define A41 %xmm10"
print "#define A42 %xmm11"
print "#define A43 %xmm12"
print "#define A44 %xmm13"
print
print "# Define loop variables."
print "#define index %rax"
print "#define base_pointer %rdx"
#print "#define i_outer %r10"
#print "#define j_outer %r11"
print
print "# Define pointers to matrix blocks in stream."
print "#define A %r8"
print "#define B %rcx"
print "#define C %r9"
# Generate offsets.
print
print "# Define offsets into matrix blocks."
print
for i in range(options.N):
for j in range(options.N):
print "#define A_OFFSET_%d%d (%d*%d+%d)*64*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*64, (i*options.N+j)*64)
print
for i in range(options.N):
for j in range(options.N):
print "#define B_OFFSET_%d%d (%d*%d+%d)*16*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*16, (i*options.N+j)*16)
print
for i in range(options.N):
for j in range(options.N):
print "#define C_OFFSET_%d%d (%d*%d+%d)*16*4 // %d = 0x%x" % (i+1, j+1, i, options.N, j, (i*options.N+j)*16, (i*options.N+j)*16)
# Print some C function declarations.
print
print "# C function declaration"
print "#"
print "# struct multiply_stream_t"
print "# {"
print "# float *A_block;"
print "# float *B_block;"
print "# float *C_block;"
print "# float norm[%d];" % (2*options.N**2)
print "# };"
print "#"
print "# void"
print "# %s (const unsigned int number_stream_elements," % (options.functionName)
print "# float alpha,"
print "# float tolerance,"
print "# struct multiply_stream_t *multiply_stream);"
# Start the function prolog.
print
print padding + "# Function prolog."
print padding + ".text"
print padding + ".align 256"
print padding + ".global %s" % (options.functionName)
print padding + ".type %s, @function" % (options.functionName)
print
print "%s:" % (options.functionName)
print
print padding + "# Push used registers on stack."
print padding + "push index"
print padding + "push base_pointer"
print padding + "push A"
print padding + "push B"
print padding + "push C"
#print padding + "push i_outer"
#print padding + "push j_outer"
print
print padding + "# Copy alpha into all 4 elements of SSE register."
print padding + "shufps $0x0, alpha, alpha"
print
print padding + "# Divide number of stream elements by %d to simulate stride of %d." % (options.N**3, options.N**3)
print padding + "shr $%i, number_stream_elements" % (3*math.log(options.N)/math.log(2))
print
print padding + "# Test whether number_stream_elements is zero."
print padding + "test number_stream_elements, number_stream_elements"
print padding + "jbe done"
print
print padding + "# Set loop index to zero."
print padding + "xor base_pointer, base_pointer"
print padding + "xor index, index"
block_counter = counter(1)
# Beginning of loop.
print
print padding + ".align 16"
print "loop:"
print
print padding + "# Set the base pointer using sizeof(multiply_stream_t) = 0x98."
print padding + "imul $0x98, base_pointer, base_pointer"
print
print padding + "# Load pointers to stream matrix blocks."
print padding + "mov (multiply_stream, base_pointer, 1), A"
print padding + "mov 0x8(multiply_stream, base_pointer, 1), B"
print padding + "mov 0x10(multiply_stream, base_pointer, 1), C"
if options.Z_curve_ordering:
generate_Z_curve(box(0, options.N, 0, options.N),
box(0, options.N, 0, options.N),
box(0, options.N, 0, options.N),
block_counter)
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
else:
#if options.N_unroll < options.N:
# # Generate outer loop code.
# print
# print padding + ".align 16"
# print "outer_i:"
#if options.N_unroll < options.N:
# # Generate outer loop code.
# print
# print padding + ".align 16"
# print "outer_j:"
for i in range(options.N):
for j in range(options.N):
print
print padding + "# Reset C(%d,%d) matrix block accumulators." % (i+1, j+1)
print padding + "xorps C1, C1"
print padding + "xorps C2, C2"
print padding + "xorps C3, C3"
print padding + "xorps C4, C4"
for k in range(options.N):
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Check norm of product."
print padding + "movss 0x%x(multiply_stream, base_pointer), B1" % ((i*options.N+k)*4+24)
print padding + "mulss 0x%x(multiply_stream, base_pointer), B1" % ((k*options.N+j+options.N**2)*4+24)
print padding + "comiss tolerance, B1"
print padding + "jb block_%d" % (block_counter.get())
print
print padding + "# Calculate C(%d,%d) = A(%d,%d)*B(%d,%d)." % (i+1, j+1, i+1, k+1, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B1" % (0*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B2" % (1*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B3" % (2*4*4, k+1, j+1)
print padding + "movaps 0x%x+B_OFFSET_%d%d(B), B4" % (3*4*4, k+1, j+1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+0)*4*4, i+1, k+1, 1, 1)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+1)*4*4, i+1, k+1, 1, 2)
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+2)*4*4, i+1, k+1, 1, 3)
print padding + "mulps B1, A11"
print padding + "mulps B2, A12"
print padding + "addps A11, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((0*4+3)*4*4, i+1, k+1, 1, 4)
print padding + "mulps B3, A13"
print padding + "addps A12, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+0)*4*4, i+1, k+1, 2, 1)
print padding + "mulps B4, A14"
print padding + "addps A13, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+1)*4*4, i+1, k+1, 2, 2)
print padding + "mulps B1, A21"
print padding + "addps A14, C1"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+2)*4*4, i+1, k+1, 2, 3)
print padding + "mulps B2, A22"
print padding + "addps A21, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((1*4+3)*4*4, i+1, k+1, 2, 4)
print padding + "mulps B3, A23"
print padding + "addps A22, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+0)*4*4, i+1, k+1, 3, 1)
print padding + "mulps B4, A24"
print padding + "addps A23, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+1)*4*4, i+1, k+1, 3, 2)
print padding + "mulps B1, A31"
print padding + "addps A24, C2"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+2)*4*4, i+1, k+1, 3, 3)
print padding + "mulps B2, A32"
print padding + "addps A31, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((2*4+3)*4*4, i+1, k+1, 3, 4)
print padding + "mulps B3, A33"
print padding + "addps A32, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+0)*4*4, i+1, k+1, 4, 1)
print padding + "mulps B4, A34"
print padding + "addps A33, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+1)*4*4, i+1, k+1, 4, 2)
print padding + "mulps B1, A41"
print padding + "addps A34, C3"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+2)*4*4, i+1, k+1, 4, 3)
print padding + "mulps B2, A42"
print padding + "addps A41, C4"
print padding + "movaps 0x%x+A_OFFSET_%d%d(A), A%d%d" % ((3*4+3)*4*4, i+1, k+1, 4, 4)
print padding + "mulps B3, A43"
print padding + "addps A42, C4"
print padding + "mulps B4, A44"
print padding + "addps A43, C4"
print padding + "addps A44, C4"
if options.generate_checks:
print
print padding + ".align 16"
print "block_%d:" % (block_counter.get())
block_counter.increment()
print
print padding + "# Multiply C(%d,%d) by alpha." % (i+1, j+1)
print padding + "mulps alpha, C1"
print padding + "mulps alpha, C2"
print padding + "mulps alpha, C3"
print padding + "mulps alpha, C4"
print
print padding + "# Add accumulated C(%d,%d) to already existing." % (i+1, j+1)
print padding + "addps 0x0+C_OFFSET_%d%d(C), C1" % (i+1, j+1)
print padding + "addps 0x10+C_OFFSET_%d%d(C), C2" % (i+1, j+1)
print padding + "addps 0x20+C_OFFSET_%d%d(C), C3" % (i+1, j+1)
print padding + "addps 0x30+C_OFFSET_%d%d(C), C4" % (i+1, j+1)
print
print padding + "# Write out C(%d,%d) submatrix block." % (i+1, j+1)
print padding + "movaps C1, 0x0+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C2, 0x10+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C3, 0x20+C_OFFSET_%d%d(C)" % (i+1, j+1)
print padding + "movaps C4, 0x30+C_OFFSET_%d%d(C)" % (i+1, j+1)
# End of loop.
print
print padding + "# Loop end."
print padding + "inc index"
print padding + "mov index, base_pointer"
print padding + "cmp number_stream_elements, index"
print padding + "jb loop"
# Leave function.
print
print padding + ".align 16"
print "done:"
print
print padding + "# Pop registers from stack."
#print padding + "pop j_outer"
#print padding + "pop i_outer"
print padding + "pop C"
print padding + "pop B"
print padding + "pop A"
print padding + "pop base_pointer"
print padding + "pop index"
print
print padding + "# Return from function."
print padding + "ret"
# Start function epilog.
print
print padding + "# Function epilog."
print padding + ".size %s, .-%s" % (options.functionName, options.functionName)
| bsd-3-clause | -6,811,387,707,721,082,000 | 35.979206 | 132 | 0.586596 | false |
smjurcak/csm | csmserver/views/download_dashboard.py | 1 | 8882 | # =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from flask import Blueprint
from flask import abort
from flask import jsonify
from flask import render_template
from flask.ext.login import login_required
from flask.ext.login import current_user
from database import DBSession
from common import can_install
from common import can_delete_install
from common import get_server_by_id
from models import logger
from models import SystemOption
from models import DownloadJob
from models import DownloadJobHistory
from constants import UNKNOWN
from constants import NAME_CSM
from constants import NAME_CSMSERVER
from constants import NAME_CSM_DATA
from constants import NAME_REPOSITORY
from constants import JobStatus
from constants import UserPrivilege
from constants import get_repository_directory
from utils import is_empty
from utils import get_file_list
from utils import get_tarfile_file_list
from utils import datetime_from_local_to_utc
from tarfile import ReadError
import os
import datetime
download_dashboard = Blueprint('download_dashboard', __name__, url_prefix='/download_dashboard')
@download_dashboard.route('/')
@login_required
def home():
if not can_install(current_user):
abort(401)
absolute_path = os.path.abspath('.')
csm_repository_path = absolute_path.replace(NAME_CSM + '/' + NAME_CSMSERVER,
NAME_CSM_DATA + '/' + NAME_REPOSITORY)
return render_template('host/download_dashboard.html',
csm_repository_path=csm_repository_path,
system_option=SystemOption.get(DBSession()))
def get_download_job_json_dict(db_session, download_jobs):
rows = []
for download_job in download_jobs:
if isinstance(download_job, DownloadJob) or isinstance(download_job, DownloadJobHistory):
row = dict()
row['download_job_id'] = download_job.id
row['image_name'] = download_job.cco_filename
row['scheduled_time'] = download_job.scheduled_time
server = get_server_by_id(db_session, download_job.server_id)
if server is not None:
row['server_repository'] = server.hostname
if not is_empty(download_job.server_directory):
row['server_repository'] = row['server_repository'] + \
'<br><span style="color: Gray;"><b>Sub-directory:</b></span> ' + \
download_job.server_directory
else:
row['server_repository'] = UNKNOWN
row['status'] = download_job.status
row['status_time'] = download_job.status_time
row['created_by'] = download_job.created_by
if download_job.trace is not None:
row['trace'] = download_job.id
rows.append(row)
return {'data': rows}
@download_dashboard.route('/api/get_files_from_csm_repository/')
@login_required
def api_get_files_from_csm_repository():
rows = []
file_list = get_file_list(get_repository_directory())
for filename in file_list:
if filename.endswith('.tar'):
statinfo = os.stat(get_repository_directory() + filename)
row = dict()
row['image_name'] = filename
row['image_size'] = str(statinfo.st_size)
row['downloaded_time'] = datetime_from_local_to_utc(datetime.datetime.fromtimestamp(statinfo.st_mtime))
rows.append(row)
return jsonify(**{'data': rows})
@download_dashboard.route('/api/image/<image_name>/delete/', methods=['DELETE'])
@login_required
def api_delete_image_from_repository(image_name):
if current_user.privilege != UserPrivilege.ADMIN and current_user.privilege != UserPrivilege.NETWORK_ADMIN:
abort(401)
tar_image_path = get_repository_directory() + image_name
try:
# Remove the tar file contents
file_list = get_tarfile_file_list(tar_image_path)
for filename in file_list:
try:
file_path = get_repository_directory() + filename
if os.path.exists(file_path):
os.remove(file_path)
except:
logger.exception('api_delete_image_from_repository() hit exception filename=' + file_path)
except ReadError:
# In case, it is a partial downloaded TAR.
pass
try:
# Remove the actual tar file
file_path = tar_image_path
if os.path.exists(file_path):
os.remove(file_path)
# Remove the auxiliary file for the tar file
file_path = tar_image_path + '.size'
if os.path.exists(file_path):
os.remove(file_path)
except:
logger.exception('api_delete_image_from_repository() hit exception filename=' + file_path)
return jsonify({'status': 'Failed'})
return jsonify({'status': 'OK'})
@download_dashboard.route('/hosts/delete_all_failed_downloads/', methods=['DELETE'])
@login_required
def delete_all_failed_downloads():
if not can_delete_install(current_user):
abort(401)
return delete_all_downloads(status=JobStatus.FAILED)
@download_dashboard.route('/hosts/delete_all_scheduled_downloads/', methods=['DELETE'])
@login_required
def delete_all_scheduled_downloads():
if not can_delete_install(current_user):
abort(401)
return delete_all_downloads()
def delete_all_downloads(status=None):
db_session = DBSession()
try:
download_jobs = db_session.query(DownloadJob).filter(DownloadJob.status == status)
for download_job in download_jobs:
db_session.delete(download_job)
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('delete_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
@download_dashboard.route('/delete_download_job/<int:id>/', methods=['DELETE'])
@login_required
def delete_download_job(id):
if not can_delete_install(current_user):
abort(401)
db_session = DBSession()
download_job = db_session.query(DownloadJob).filter(DownloadJob.id == id).first()
if download_job is None:
abort(404)
try:
# Download jobs that are in progress cannot be deleted.
if download_job.status is None or download_job.status == JobStatus.FAILED:
db_session.delete(download_job)
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('delete_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
@download_dashboard.route('/resubmit_download_job/<int:id>/', methods=['POST'])
@login_required
def resubmit_download_job(id):
if not can_install(current_user):
abort(401)
db_session = DBSession()
download_job = db_session.query(DownloadJob).filter(DownloadJob.id == id).first()
if download_job is None:
abort(404)
try:
# Download jobs that are in progress cannot be deleted.
download_job.status = None
download_job.status_time = None
db_session.commit()
return jsonify({'status': 'OK'})
except:
logger.exception('resubmit_download_job() hit exception')
return jsonify({'status': 'Failed: check system logs for details'})
| apache-2.0 | -7,126,437,171,829,432,000 | 34.670683 | 115 | 0.654582 | false |
51reboot/actual_13_homework | 05/sxq/build.py | 1 | 4185 | #!/usr/bin/env python
#coding:utf-8
# os/sys/math/time/random
import os,sys,math,time,random,datetime
# print os.sep #路径分隔符
# print os.name #系统
# print os.getcwd() #获取当前路径
# print os.getenv() #获取环境变量
# print os.putenv() #设置环境变量
# print os.listdir('/Users') #获取特定路径下的文件和目录
# print os.remove() 删除文件
# print os.removedirs() 删除目录
# print os.system('ls') # 运行shell 命令
# print os.linesep 获取行终止符
# print os.listdir('/Users/test') #列出目录下的文件和目录
# print os.curdir #获取当前目录(相对路径)
# print os.path.split('/Users/test/agent') #返回文件路径和文件名
# print os.path.isdir('/Users/test/agent') #判断目录
# print os.path.isfile('/Users/test/agent') #判断文件
# print os.path.exists('/Users/test/agent') #路径是否存在
# print os.path.getsize('/Users/test/agent') #获取文件大小
# print os.path.getatime('/Users/test/agent') #打开时间
# print os.path.getctime('/Users/test/agent') #创建时间
# print os.path.getmtime('/Users/test/agent') #修改时间
# print os.path.abspath('/Users/test/agent') #获取绝对路径
# print os.path.splitext('/Users/test/agent') #文件名和扩展名
# print os.path.join('/Users/test','agent') #连接目录和文件名
# print os.path.basename('/Users/test/agent') #返回文件名
# print os.path.dirname('/Users/test/agent') #返回文件路径
# print os.uname() #获取系统信息
# print os.chmod(path,mode) # 修改文件权限
# print os.chown(path,uid,gid) #修改文件用户组
# print os.getegid() #用户gid
# print os.geteuid() #用户uid
# print os.getuid() #用户ID
# print os.getgid() #用户gid
# print os.getgroups() #用户的所有组id
# print os.getloadavg() #系统负载
# print os.getlogin() #登录用户
# print os.getpgid()
# print os.getpgrp() #用户组id
# print os.getpid() #当前进程ID
# print os.getppid() #当前父进程ID
# print os.getsid()
'''
abort
access
altsep
chdir
chflags
chroot
close
closerange
confstr
confstr_names
ctermid
curdir
defpath
devnull
dup
dup2
environ
errno
error
execl
execle
execlp
execlpe
execv
execve
execvp
execvpe
extsep
fchdir
fchmod
fchown
fdopen
fork
forkpty
fpathconf
fstat
fstatvfs
fsync
ftruncate
initgroups
isatty
kill
killpg
lchflags
lchmod
lchown
linesep
link
lseek
lstat
major
makedev
makedirs
minor
mkdir
mkfifo
mknod
name
nice
open
openpty
pardir
pathconf
pathconf_names
pathsep
pipe
popen
popen2
popen3
popen4
read
readlink
rename
renames
rmdir
sep
setegid
seteuid
setgid
setgroups
setpgid
setpgrp
setregid
setreuid
setsid
setuid
spawnl
spawnle
spawnlp
spawnlpe
spawnv
spawnve
spawnvp
spawnvpe
stat
stat_float_times
stat_result
statvfs
statvfs_result
strerror
symlink
sys
sysconf
sysconf_names
system
tcgetpgrp
tcsetpgrp
tempnam
times
tmpfile
tmpnam
ttyname
umask
uname
unlink
unsetenv
urandom
utime
wait
wait3
wait4
waitpid
walk
write
'''
'''
api_version
argv
builtin_module_names
byteorder
call_tracing
callstats
copyright
displayhook
dont_write_bytecode
exc_clear
exc_info
exc_type
excepthook
exec_prefix
executable
exit
flags
float_info
float_repr_style
getcheckinterval
getdefaultencoding
getdlopenflags
getfilesystemencoding
getprofile
getrecursionlimit
getrefcount
getsizeof
gettrace
hexversion
long_info
maxint
maxsize
maxunicode
meta_path
modules
path
path_hooks
path_importer_cache
platform
prefix
py3kwarning
setcheckinterval
setdlopenflags
setprofile
setrecursionlimit
settrace
stderr
stdin
stdout
subversion
version
version_info
warnoptions
'''
'''
acos
acosh
asin
asinh
atan
atan2
atanh
ceil
copysign
cos
cosh
degrees
e
erf
erfc
exp
expm1
fabs
factorial
floor
fmod
frexp
fsum
gamma
hypot
isinf
isnan
ldexp
lgamma
log
log10
log1p
modf
pi
pow
radians
sin
sinh
sqrt
tan
tanh
trunc
'''
'''
accept2dyear
altzone
asctime
clock
ctime
daylight
gmtime
localtime
mktime
sleep
strftime
strptime
struct_time
time
timezone
tzname
tzset
'''
'''
betavariate
choice
division
expovariate
gammavariate
gauss
getrandbits
getstate
jumpahead
lognormvariate
normalvariate
paretovariate
randint
random
randrange
sample
seed
setstate
shuffle
triangular
uniform
vonmisesvariate
weibullvariate
''' | mit | -9,219,017,705,434,372,000 | 10.732919 | 54 | 0.787397 | false |
tdickers/mitmproxy | mitmproxy/utils.py | 1 | 1071 | from __future__ import absolute_import, print_function, division
import netlib.utils
pkg_data = netlib.utils.Data(__name__)
class LRUCache:
"""
A simple LRU cache for generated values.
"""
def __init__(self, size=100):
self.size = size
self.cache = {}
self.cacheList = []
def get(self, gen, *args):
"""
gen: A (presumably expensive) generator function. The identity of
gen is NOT taken into account by the cache.
*args: A list of immutable arguments, used to establish identiy by
*the cache, and passed to gen to generate values.
"""
if args in self.cache:
self.cacheList.remove(args)
self.cacheList.insert(0, args)
return self.cache[args]
else:
ret = gen(*args)
self.cacheList.insert(0, args)
self.cache[args] = ret
if len(self.cacheList) > self.size:
d = self.cacheList.pop()
self.cache.pop(d)
return ret
| mit | 1,121,692,843,674,526,200 | 27.184211 | 78 | 0.549953 | false |
Cladis/wikilabels | wikilabels/database/worksets.py | 1 | 8760 | import psycopg2
from psycopg2.extras import Json
from .collection import Collection
from .errors import IntegrityError, NotFoundError
class Worksets(Collection):
def get(self, workset_id, stats=False):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
WHERE id = %(workset_id)s
ORDER BY id
""", {'workset_id': workset_id})
try:
doc = next(cursor)
if stats: doc['stats'] = self.stats_for(workset_id)
return doc
except StopIteration:
raise NotFoundError("workset_id={0}".format(workset_id))
def stats_for(self, workset_id):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
COUNT(workset_task.task_id) AS tasks,
COALESCE(SUM(label.task_id IS NOT NULL::int), 0) AS labeled
FROM workset
INNER JOIN workset_task ON workset_task.workset_id = workset.id
LEFT JOIN label ON
label.task_id = workset_task.task_id AND
label.user_id = workset.user_id
WHERE workset.id = %(workset_id)s
""", {'workset_id': workset_id})
try:
return next(cursor)
except StopIteration:
raise NotFoundError("workset_id={0}".format(workset_id))
def for_campaign(self, campaign_id, stats=False):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
WHERE campaign_id = %(campaign_id)s
ORDER BY id
""", {'campaign_id': campaign_id})
rows = []
for row in cursor:
if stats: row['stats'] = self.stats_for(row['id'])
rows.append(row)
return rows
def for_user(self, user_id, campaign_id=None, stats=False):
with self.db.conn.cursor() as cursor:
conditions = ["workset.user_id = %(user_id)s"]
if campaign_id is not None:
conditions.append("workset.campaign_id = %(campaign_id)s")
where = "\nWHERE " + " AND ".join(conditions) + "\n"
cursor.execute("""
SELECT
id, user_id,
campaign_id,
EXTRACT(EPOCH FROM created) AS created,
EXTRACT(EPOCH FROM expires) AS expires
FROM workset
""" + where + """
ORDER BY id
""", {'user_id': user_id,
'campaign_id': campaign_id})
rows = []
for row in cursor:
if stats: row['stats'] = self.stats_for(row['id'])
rows.append(row)
return rows
def open_workset_for_user(self, campaign_id, user_id):
with self.db.conn.cursor() as cursor:
# Check if this user already has an open workset
cursor.execute("""
SELECT
workset.id
FROM workset
INNER JOIN workset_task ON workset.id = workset_task.workset_id
INNER JOIN task ON workset_task.task_id = task.id
LEFT JOIN label ON
task.id = label.task_id AND
workset.user_id = label.user_id
WHERE workset.user_id = %(user_id)s AND
workset.campaign_id = %(campaign_id)s AND
label.task_id IS NULL
LIMIT 1;
""", {'user_id': user_id,
'campaign_id': campaign_id})
rows = cursor.fetchall()
if len(rows) > 0:
return rows[0]['id']
else:
return None
def assign(self, campaign_id, user_id, stats=False):
with self.db.conn.cursor() as cursor:
campaign = self.db.campaigns.get(campaign_id)
if not campaign['active']:
raise IntegrityError("Campaign {0} not active." \
.format(campaign_id))
workset_id = self.open_workset_for_user(campaign_id, user_id)
if workset_id is not None:
raise IntegrityError(("Incomplete workset_id={0} already " +
"assigned to user_id={1}") \
.format(workset_id, user_id))
if not self.db.campaigns.has_open_tasks(campaign_id, user_id):
raise IntegrityError(("No tasks available for user_id={0} " +
"in campaign_id={1}") \
.format(user_id, campaign_id))
try:
# Create a new workset
cursor.execute("""
INSERT INTO workset VALUES
(DEFAULT, %(campaign_id)s, %(user_id)s, NOW(),
NOW() + INTERVAL '1 DAY') RETURNING id;
""", {'campaign_id': campaign_id,
'user_id': user_id})
workset_id = cursor.fetchone()['id']
# Assign tasks to the workset
cursor.execute("""
INSERT INTO workset_task
SELECT
%(workset_id)s AS workset_id,
task.id AS task_id
FROM campaign
INNER JOIN task ON task.campaign_id = campaign.id
LEFT JOIN label ON
label.task_id = task.id
WHERE campaign.id = %(campaign_id)s
GROUP BY task.id, campaign.labels_per_task
HAVING
COUNT(label.task_id) < campaign.labels_per_task AND
SUM((label.user_id IS NOT NULL AND
label.user_id = %(user_id)s)::int) = 0
ORDER BY RANDOM()
LIMIT %(tasks_per_assignment)s
""", {'campaign_id': campaign_id,
'workset_id': workset_id,
'user_id': user_id,
'tasks_per_assignment': campaign['tasks_per_assignment']})
self.db.conn.commit()
except Exception:
self.db.conn.rollback()
raise
return self.get(workset_id, stats);
def users(self):
with self.db.conn.cursor() as cursor:
cursor.execute("""
SELECT DISTINCT user_id
FROM workset
ORDER BY user_id
""")
return [row['user_id'] for row in cursor]
def abandon(self, workset_id, user_id):
with self.db.conn.cursor() as cursor:
cursor = self.db.conn.cursor()
# Check if this user owns this workset
cursor.execute("""
SELECT 1 FROM workset
WHERE id = %(workset_id)s AND
user_id = %(user_id)s
""", {'workset_id': workset_id, 'user_id': user_id})
if len(cursor.fetchall()) == 0:
raise IntegrityError("workset_id={0} does not belong to user_id={1}" \
.format(workset_id, user_id))
# Clear incomplete assignements
try:
cursor.execute("""
DELETE FROM workset_task
WHERE
workset_id = %(workset_id)s AND
task_id IN (
SELECT workset_task.task_id
FROM workset_task
LEFT JOIN label ON
workset_task.task_id = label.task_id AND
label.user_id = %(user_id)s
WHERE
workset_id = %(workset_id)s AND
label.task_id IS NULL
)
""", {'workset_id': workset_id, 'user_id': user_id})
self.db.conn.commit()
except Exception:
self.db.conn.rollback()
raise
return self.get(workset_id)
| mit | 8,908,630,188,383,749,000 | 37.086957 | 86 | 0.457648 | false |
ljhandlwt/flask-study | flask1/app/models.py | 1 | 3601 | #coding=utf-8
from flask import url_for
from flask.ext.sqlalchemy import SQLAlchemy
from app import db,app
from hashlib import md5
from datetime import datetime
import os
user_user_concern=db.Table('user_user_concern',
db.Column('user1_id',db.Integer,db.ForeignKey('user.id'),primary_key=True),
db.Column('user2_id',db.Integer,db.ForeignKey('user.id'),primary_key=True))
user_user_blacklist=db.Table('user_user_blacklist',
db.Column('user1_id',db.Integer,db.ForeignKey('user.id'),primary_key=True),
db.Column('user2_id',db.Integer,db.ForeignKey('user.id'),primary_key=True))
class User(db.Model):
id=db.Column(db.Integer,primary_key=True)
nickname=db.Column(db.String(80),unique=True)
password=db.Column(db.String(120))
email=db.Column(db.String(120),unique=True)
information=db.Column(db.String(250))
posts=db.relationship('Post',backref='author',lazy='dynamic')
has_avatar=db.Column(db.Integer,default=0)
create_time=db.Column(db.DateTime,default=datetime.now())
messages=db.relationship('Message',backref='people',lazy='dynamic')
concern=db.relationship('User',secondary=user_user_concern,primaryjoin=id==user_user_concern.c.user1_id,
secondaryjoin=id==user_user_concern.c.user2_id,backref='concerned')
blacklist=db.relationship('User',secondary=user_user_blacklist,primaryjoin=id==user_user_blacklist.c.user1_id,
secondaryjoin=id==user_user_blacklist.c.user2_id,backref='blackedlist')
is_authenticated=True
is_active=True
is_anonymous=False
def __init__(self,nickname,password,email):
self.nickname=nickname
self.password=password
self.email=email
self.information=u'这个人很懒,什么都没有写...'
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def avatar(self):
if self.has_avatar:
return '/static/avatar/'+self.nickname+'.jpg'
else:
return url_for('static',filename='favicon.ico')
def has_concern(self,user):
return self.concern.count(user)
def has_concerned(self,user):
return self.concerned.count(user)
def has_black(self,user):
return self.blacklist.count(user)
def count_new_message(self):
return Message.query.filter_by(user_id=self.id,has_showed=0).count()
def is_admin(self):
return Admin.query.filter_by(nickname=self.nickname).first() is not None
def __repr__(self):
return '<User %r>' % self.nickname
class Post(db.Model):
id=db.Column(db.Integer,primary_key=True)
title=db.Column(db.String(80))
body=db.Column(db.Text)
date=db.Column(db.DateTime)
user_id=db.Column(db.Integer,db.ForeignKey('user.id'))
def __init__(self,title,body,author):
self.title=title
self.body=body
self.date=datetime.now()
self.author=author
def __repr__(self):
return '<Post %r>' % self.body
class Message(db.Model):
id=db.Column(db.Integer,primary_key=True)
content=db.Column(db.Text)
user_id=db.Column(db.Integer,db.ForeignKey('user.id'))
date=db.Column(db.DateTime)
has_showed=db.Column(db.Integer,default=0)
def __init__(self,content,people):
self.content=content
self.people=people
self.date=datetime.now()
def __repr__(self):
return '<%r>' % self.content
class Admin(db.Model):
id=db.Column(db.Integer,primary_key=True)
nickname=db.Column(db.String(80),unique=True)
def __repr__(self):
return '<User %r>' % self.nickname
class PublicConfig(db.Model):
id=db.Column(db.Integer,primary_key=True)
sicily_message=db.Column(db.String(500))
def __repr__(self):
return '<'+self.sicily_message+'>' | mit | 1,049,869,579,345,582,300 | 29.13913 | 111 | 0.706063 | false |
JohnLZeller/dd-agent | tests/test_cassandra.py | 1 | 5263 | import logging
import unittest
from dogstream.cassandra import parse_cassandra
logger = logging.getLogger(__name__)
class TestCassandraDogstream(unittest.TestCase):
def testStart(self):
events = parse_cassandra(logger, " INFO [main] 2012-12-11 21:46:26,995 StorageService.java (line 687) Bootstrap/Replace/Move completed! Now serving reads.")
self.assertTrue(events is None)
def testInfo(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:35] 2012-12-02 21:15:03,738 AutoSavingCache.java (line 268) Saved KeyCache (5 items) in 3 ms")
self.assertTrue(events is None)
def testWarn(self):
events = parse_cassandra(logger, " WARN [MemoryMeter:1] 2012-12-03 20:07:47,158 Memtable.java (line 197) setting live ratio to minimum of 1.0 instead of 0.9416553595658074")
self.assertTrue(events is None)
def testError(self):
for line in """\
ERROR [CompactionExecutor:518] 2012-12-11 21:35:29,686 AbstractCassandraDaemon.java (line 135) Exception in thread Thread[CompactionExecutor:518,1,RMI Runtime]
java.util.concurrent.RejectedExecutionException
at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:1768)
at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:767)
at java.util.concurrent.ScheduledThreadPoolExecutor.delayedExecute(ScheduledThreadPoolExecutor.java:215)
at java.util.concurrent.ScheduledThreadPoolExecutor.schedule(ScheduledThreadPoolExecutor.java:397)
at java.util.concurrent.ScheduledThreadPoolExecutor.submit(ScheduledThreadPoolExecutor.java:470)
at org.apache.cassandra.io.sstable.SSTableDeletingTask.schedule(SSTableDeletingTask.java:67)
at org.apache.cassandra.io.sstable.SSTableReader.releaseReference(SSTableReader.java:806)
at org.apache.cassandra.db.DataTracker.removeOldSSTablesSize(DataTracker.java:358)
at org.apache.cassandra.db.DataTracker.postReplace(DataTracker.java:330)
at org.apache.cassandra.db.DataTracker.replace(DataTracker.java:324)
at org.apache.cassandra.db.DataTracker.replaceCompactedSSTables(DataTracker.java:253)
at org.apache.cassandra.db.ColumnFamilyStore.replaceCompactedSSTables(ColumnFamilyStore.java:992)
at org.apache.cassandra.db.compaction.CompactionTask.execute(CompactionTask.java:200)
at org.apache.cassandra.db.compaction.CompactionManager$1.runMayThrow(CompactionManager.java:154)
at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:30)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
at java.util.concurrent.FutureTask.run(FutureTask.java:138)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:662)""".splitlines():
events = parse_cassandra(logger, line)
self.assertTrue(events is None)
def testCompactionStart(self):
events = parse_cassandra(logger, " INFO [CompactionExecutor:2] 2012-12-11 21:46:27,012 CompactionTask.java (line 109) Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-1", 'msg_text': "Compacting [SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-11-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-9-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-12-Data.db'), SSTableReader(path='/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-10-Data.db')]", 'auto_priority': 0}])
def testCompactionEnd(self):
events = parse_cassandra(logger, "INFO [CompactionExecutor:2] 2012-12-11 21:46:27,095 CompactionTask.java (line 221) Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.")
self.assertEquals(events, [{'alert_type': 'info', 'event_type': 'cassandra.compaction', 'timestamp': 1355262387, 'msg_title': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 ', 'msg_text': 'Compacted to [/var/lib/cassandra/data/system/LocationInfo/system-LocationInfo-he-13-Data.db,]. 880 to 583 (~66% of original) bytes for 4 keys at 0.007831MB/s. Time: 71ms.', 'auto_priority': 0}])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,113,671,884,750,322,700 | 86.716667 | 689 | 0.758123 | false |
stanleykylee/worldbank-world-development-indicators-distributed-tf | src/build_sequences.py | 1 | 1618 | import csv
import json
# configuration
DATA_FILE = 'WDI_Data.csv'
INDICATORS_FILE = 'indicators.config'
OUTPUT_FILE = 'population-data.csv.csv'
def make_country_dict():
country = {}
for i in range(0,57):
country[i] = {}
return country
# extract selected indicators and write time series entries of them to csv
def flush(dict):
out_str = ''
for entry in dict:
if len(dict[entry]) < len(selected_indicators):
continue
out_str = ''
for key in dict[entry]:
out_str += dict[entry][key] + ','
out_str = out_str[:-1] + '\n'
with open(OUTPUT_FILE, 'a') as f:
f.write(out_str)
f.flush()
return
# create list of indicators selected from dataset
with open(INDICATORS_FILE) as f:
selected_indicators = f.readlines()
selected_indicators = [elem.strip() for elem in selected_indicators]
with open(DATA_FILE, 'rb') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
idx = 0
for row in csv_reader:
if (idx == 0):
idx += 1
continue;
if (idx == 1):
country_dict = make_country_dict()
country = row[0]
if (row[0] != country):
country = row[0]
flush(country_dict)
country_dict = make_country_dict()
row_idx = 0
row_name = row[3]
if row_name in selected_indicators:
for item in row:
if (row_idx > 3 and item != ''):
country_dict[row_idx - 4][row_name] = item
row_idx += 1
idx += 1
| gpl-3.0 | -3,085,287,364,212,459,000 | 27.892857 | 74 | 0.543263 | false |
whitehorse-io/encarnia | Encarnia/typeclasses/statue.py | 1 | 1228 | """
Readables
A readable plaque.
"""
from evennia import DefaultObject, Command, CmdSet
from world import english_utils
from evennia.utils import list_to_string
from random import randint
import time
from typeclasses.objects import Object
# the "read" command
class CmdReadStatue(Command):
"""
Hit a box until it breaks
Usage:
hit box
If the object is breakable, it will eventually
break down after enough hits.
"""
key = "read statue"
#aliases = ["hit", "break box", "break"]
locks = "cmd:all()"
help_category = "General"
def func(self):
# this Command sits on the box, so we don't need to search for it
self.caller.msg(self.obj.db.text)
class StatueCmdSet(CmdSet):
key = "read_statue_cmdset"
def at_cmdset_creation(self):
self.add(CmdReadStatue())
class Statue(DefaultObject):
"""
"""
def at_object_creation(self):
# Inherit the object properties.
super(Plaque, self).at_object_creation()
self.aliases.add([])
self.db.desc = False
self.db.text = "Beware: Lions..."
self.cmdset.add(StatueCmdSet, permanent=True) | mit | -988,572,104,984,226,600 | 19.578947 | 73 | 0.614821 | false |
shastah/spacewalk | backend/server/importlib/packageImport.py | 1 | 22007 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Package import process
#
import rpm
import sys
import os.path
from importLib import GenericPackageImport, IncompletePackage, \
Import, InvalidArchError, InvalidChannelError, \
IncompatibleArchError
from mpmSource import mpmBinaryPackage
from spacewalk.common import rhn_pkg
from spacewalk.common.rhnConfig import CFG
from spacewalk.server import taskomatic
from spacewalk.server.rhnServer import server_packages
class ChannelPackageSubscription(GenericPackageImport):
def __init__(self, batch, backend, caller=None, strict=0, repogen=True):
# If strict, the set of packages that was passed in will be the only
# one in the channels - everything else will be unlinked
GenericPackageImport.__init__(self, batch, backend)
self.affected_channels = []
# A hash keyed on the channel id, and with tuples
# (added_packages, removed_packages) as values (packages are package
# ids)
self.affected_channel_packages = {}
if not caller:
self.caller = "backend.(unknown)"
else:
self.caller = caller
self._strict_subscription = strict
self.repogen = repogen
def preprocess(self):
# Processes the package batch to a form more suitable for database
# operations
for package in self.batch:
# if package object doesn't have multiple checksums (like satellite-sync objects)
# then let's fake it
if 'checksums' not in package:
package['checksums'] = {package['checksum_type']: package['checksum']}
if not isinstance(package, IncompletePackage):
raise TypeError("Expected an IncompletePackage instance, "
"got %s" % package.__class__.__name__)
self._processPackage(package)
def fix(self):
# Look up arches and channels
self.backend.lookupPackageArches(self.package_arches)
self.backend.lookupChannels(self.channels)
# Initialize self.channel_package_arch_compat
self.channel_package_arch_compat = {}
for channel, channel_row in self.channels.items():
if not channel_row:
# Unsupported channel
continue
self.channel_package_arch_compat[channel_row['channel_arch_id']] = None
self.backend.lookupChannelPackageArchCompat(self.channel_package_arch_compat)
self.backend.lookupPackageNames(self.names)
self.backend.lookupEVRs(self.evrs)
self.backend.lookupChecksums(self.checksums)
# Fix the package information up, and uniquify the packages too
uniqdict = {}
for package in self.batch:
if package.ignored:
continue
self._postprocessPackageNEVRA(package)
if not CFG.ENABLE_NVREA:
# nvrea disabled, skip checksum
nevrao = (
package['name_id'],
package['evr_id'],
package['package_arch_id'],
package['org_id'])
else:
# As nvrea is enabled uniquify based on checksum
nevrao = (
package['name_id'],
package['evr_id'],
package['package_arch_id'],
package['org_id'],
package['checksum_id'])
if nevrao not in uniqdict:
# Uniquify the channel names
package['channels'] = {}
# Initialize the channels
# This is a handy way of checking arch compatibility for this
# package with its channels
self.__copyChannels(package, package)
uniqdict[nevrao] = package
else:
# Package is found twice in the same batch
# Are the packages the same?
self._comparePackages(package, uniqdict[nevrao])
# Invalidate it
package.ignored = 1
firstpackage = uniqdict[nevrao]
# Copy any new channels
self.__copyChannels(package, firstpackage)
# Knowing the id of the referenced package
package.first_package = firstpackage
def _comparePackages(self, package1, package2):
# XXX This should probably do a deep compare of the two packages
pass
def submit(self):
self.backend.lookupPackages(self.batch, self.checksums)
try:
affected_channels = self.backend.subscribeToChannels(self.batch,
strict=self._strict_subscription)
except:
self.backend.rollback()
raise
self.compute_affected_channels(affected_channels)
if len(self.batch) < 10:
# update small batch per package
name_ids = [pkg['name_id'] for pkg in self.batch]
else:
# update bigger batch at once
name_ids = []
self.backend.update_newest_package_cache(caller=self.caller,
affected_channels=self.affected_channel_packages, name_ids=name_ids)
# Now that channel is updated, schedule the repo generation
if self.repogen:
taskomatic.add_to_repodata_queue_for_channel_package_subscription(
self.affected_channels, self.batch, self.caller)
self.backend.commit()
def compute_affected_channels(self, affected_channels):
# Fill the list of affected channels
self.affected_channel_packages.clear()
self.affected_channel_packages.update(affected_channels)
for channel_label, channel_row in list(self.channels.items()):
channel_id = channel_row['id']
if channel_id in affected_channels:
affected_channels[channel_id] = channel_label
self.affected_channels = list(affected_channels.values())
def _processPackage(self, package):
GenericPackageImport._processPackage(self, package)
# Process channels
channels = []
channelHash = {}
for channel in package['channels']:
channelName = channel['label']
if channelName not in channelHash:
channels.append(channelName)
channelHash[channelName] = None
self.channels[channelName] = None
# Replace the channel list with the uniquified list
package.channels = channels
# Copies the channels from one package to the other
def __copyChannels(self, sourcePackage, destPackage):
dpHash = destPackage['channels']
for schannelName in sourcePackage.channels:
# Check if the package is compatible with the channel
channel = self.channels[schannelName]
if not channel:
# Unknown channel
sourcePackage.ignored = 1
raise InvalidChannelError(channel,
"Unsupported channel %s" % schannelName)
# Check channel-package compatibility
charch = channel['channel_arch_id']
archCompat = self.channel_package_arch_compat[charch]
if not archCompat:
# Invalid architecture
sourcePackage.ignored = 1
raise InvalidArchError(charch,
"Invalid channel architecture %s" % charch)
# Now check if the source package's arch is compatible with the
# current channel
if sourcePackage['package_arch_id'] not in archCompat:
sourcePackage.ignored = 1
raise IncompatibleArchError(sourcePackage.arch, charch,
"Package arch %s incompatible with channel %s" %
(sourcePackage.arch, schannelName))
dpHash[channel['id']] = schannelName
destPackage.channels = list(dpHash.values())
class PackageImport(ChannelPackageSubscription):
def __init__(self, batch, backend, caller=None, update_last_modified=0):
ChannelPackageSubscription.__init__(self, batch, backend,
caller=caller)
self.ignoreUploaded = 1
self._update_last_modified = update_last_modified
self.capabilities = {}
self.groups = {}
self.sourceRPMs = {}
self.changelog_data = {}
def _rpm_knows(self, tag):
# See if the installed version of RPM understands a given tag
# Assumed attr-format in RPM is 'RPMTAG_<UPPERCASETAG>'
return hasattr(rpm, 'RPMTAG_'+tag.upper())
def _processPackage(self, package):
ChannelPackageSubscription._processPackage(self, package)
# Process package groups
group = package['package_group']
if group not in self.groups:
self.groups[group] = None
sourceRPM = package['source_rpm']
if (sourceRPM is not None) and (sourceRPM not in self.sourceRPMs):
self.sourceRPMs[sourceRPM] = None
# Change copyright to license
# XXX
package['copyright'] = self._fix_encoding(package['license'])
for tag in ('recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
if not self._rpm_knows(tag) or tag not in package or type(package[tag]) != type([]):
# older spacewalk server do not export weak deps.
# and older RPM doesn't know about them either
# lets create an empty list
package[tag] = []
# Creates all the data structures needed to insert capabilities
for tag in ('provides', 'requires', 'conflicts', 'obsoletes', 'recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
depList = package[tag]
if type(depList) != type([]):
sys.stderr.write("!!! packageImport.PackageImport._processPackage: "
"erronous depList for '%s', converting to []\n" % tag)
depList = []
for dep in depList:
nv = []
for f in ('name', 'version'):
nv.append(dep[f])
del dep[f]
nv = tuple(nv)
dep['capability'] = nv
if nv not in self.capabilities:
self.capabilities[nv] = None
# Process files too
fileList = package['files']
for f in fileList:
filename = self._fix_encoding(f['name'])
nv = (filename, '')
del f['name']
f['capability'] = nv
if nv not in self.capabilities:
self.capabilities[nv] = None
fchecksumTuple = (f['checksum_type'], f['checksum'])
if fchecksumTuple not in self.checksums:
self.checksums[fchecksumTuple] = None
# Uniquify changelog entries
unique_package_changelog_hash = {}
unique_package_changelog = []
for changelog in package['changelog']:
key = (changelog['name'], changelog['time'], changelog['text'])
if key not in unique_package_changelog_hash:
self.changelog_data[key] = None
unique_package_changelog.append(changelog)
unique_package_changelog_hash[key] = 1
package['changelog'] = unique_package_changelog
# fix encoding issues in package summary and description
package['description'] = self._fix_encoding(package['description'])
package['summary'] = self._fix_encoding(package['summary'])
def fix(self):
# If capabilities are available, process them
if self.capabilities:
try:
self.backend.processCapabilities(self.capabilities)
except:
# Oops
self.backend.rollback()
raise
# Since this is the bulk of the work, commit
self.backend.commit()
self.backend.processChangeLog(self.changelog_data)
ChannelPackageSubscription.fix(self)
self.backend.lookupSourceRPMs(self.sourceRPMs)
self.backend.lookupPackageGroups(self.groups)
# Postprocess the gathered information
self.__postprocess()
def submit(self):
upload_force = self.uploadForce
if not upload_force and self._update_last_modified:
# # Force it just a little bit - kind of hacky
upload_force = 0.5
try:
self.backend.processPackages(self.batch,
uploadForce=upload_force,
forceVerify=self.forceVerify,
ignoreUploaded=self.ignoreUploaded,
transactional=self.transactional)
self._import_signatures()
except:
# Oops
self.backend.rollback()
raise
self.backend.commit()
if not self._update_last_modified:
# Go though the list of objects and clear out the ones that have a
# force of 0.5
for p in self.batch:
if p.diff and p.diff.level == 0.5:
# Ignore this difference completely
p.diff = None
# Leave p.diff_result in place
def subscribeToChannels(self):
affected_channels = self.backend.subscribeToChannels(self.batch)
# Fill the list of affected channels
self.compute_affected_channels(affected_channels)
name_ids = [pkg['name_id'] for pkg in self.batch]
self.backend.update_newest_package_cache(caller=self.caller,
affected_channels=self.affected_channel_packages, name_ids=name_ids)
taskomatic.add_to_repodata_queue_for_channel_package_subscription(
self.affected_channels, self.batch, self.caller)
self.backend.commit()
def __postprocess(self):
# Gather the IDs we've found
for package in self.batch:
if package.ignored:
# Skip it
continue
# Only deal with packages
self.__postprocessPackage(package)
def __postprocessPackage(self, package):
""" populate the columns foo_id with id numbers from appropriate hashes """
package['package_group'] = self.groups[package['package_group']]
source_rpm = package['source_rpm']
if source_rpm is not None:
source_rpm = self.sourceRPMs[source_rpm]
else:
source_rpm = ''
package['source_rpm_id'] = source_rpm
package['checksum_id'] = self.checksums[(package['checksum_type'], package['checksum'])]
# Postprocess the dependency information
for tag in ('provides', 'requires', 'conflicts', 'obsoletes', 'files', 'recommends', 'suggests', 'supplements', 'enhances', 'breaks', 'predepends'):
for entry in package[tag]:
nv = entry['capability']
entry['capability_id'] = self.capabilities[nv]
for c in package['changelog']:
c['changelog_data_id'] = self.changelog_data[(c['name'], c['time'], c['text'])]
fileList = package['files']
for f in fileList:
f['checksum_id'] = self.checksums[(f['checksum_type'], f['checksum'])]
def _comparePackages(self, package1, package2):
if (package1['checksum_type'] == package2['checksum_type']
and package1['checksum'] == package2['checksum']):
return
# XXX Handle this better
raise Exception("Different packages in the same batch")
def _cleanup_object(self, object):
ChannelPackageSubscription._cleanup_object(self, object)
if object.ignored:
object.id = object.first_package.id
def _import_signatures(self):
for package in self.batch:
# skip missing files and mpm packages
if package['path'] and not isinstance(package, mpmBinaryPackage):
full_path = os.path.join(CFG.MOUNT_POINT, package['path'])
if os.path.exists(full_path):
header = rhn_pkg.get_package_header(filename=full_path)
server_packages.processPackageKeyAssociations(header,
package['checksum_type'], package['checksum'])
def _fix_encoding(self, text):
if text is None:
return None
try:
return text.decode('utf8')
except UnicodeDecodeError:
return text.decode('iso8859-1')
class SourcePackageImport(Import):
def __init__(self, batch, backend, caller=None, update_last_modified=0):
Import.__init__(self, batch, backend)
self._update_last_modified = update_last_modified
self.ignoreUploaded = 1
self.sourceRPMs = {}
self.groups = {}
self.checksums = {}
def preprocess(self):
for package in self.batch:
self._processPackage(package)
def fix(self):
self.backend.lookupSourceRPMs(self.sourceRPMs)
self.backend.lookupPackageGroups(self.groups)
self.backend.lookupChecksums(self.checksums)
self.__postprocess()
# Uniquify the packages
uniqdict = {}
for package in self.batch:
# Unique key
key = (package['org_id'], package['source_rpm_id'])
if key not in uniqdict:
uniqdict[key] = package
continue
else:
self._comparePackages(package, uniqdict[key])
# And invalidate it
package.ignored = 1
package.first_package = uniqdict[key]
def submit(self):
upload_force = self.uploadForce
if not upload_force and self._update_last_modified:
# # Force it just a little bit - kind of hacky
upload_force = 0.5
try:
self.backend.processSourcePackages(self.batch,
uploadForce=upload_force,
forceVerify=self.forceVerify,
ignoreUploaded=self.ignoreUploaded,
transactional=self.transactional)
except:
# Oops
self.backend.rollback()
raise
self.backend.commit()
if not self._update_last_modified:
# Go though the list of objects and clear out the ones that have a
# force of 0.5
for p in self.batch:
if p.diff and p.diff.level == 0.5:
# Ignore this difference completely
p.diff = None
# Leave p.diff_result in place
def _comparePackages(self, package1, package2):
if (package1['checksum_type'] == package2['checksum_type']
and package1['checksum'] == package2['checksum']):
return
# XXX Handle this better
raise Exception("Different packages in the same batch")
def _processPackage(self, package):
Import._processPackage(self, package)
# Fix the arch
package.arch = 'src'
package.source_rpm = package['source_rpm']
sourceRPM = package['source_rpm']
if not sourceRPM:
# Should not happen
raise Exception("Source RPM %s does not exist")
self.sourceRPMs[sourceRPM] = None
self.groups[package['package_group']] = None
checksumTuple = (package['checksum_type'], package['checksum'])
if checksumTuple not in self.checksums:
self.checksums[checksumTuple] = None
sigchecksumTuple = (package['sigchecksum_type'], package['sigchecksum'])
if sigchecksumTuple not in self.checksums:
self.checksums[sigchecksumTuple] = None
def __postprocess(self):
# Gather the IDs we've found
for package in self.batch:
if package.ignored:
# Skip it
continue
# Only deal with packages
self.__postprocessPackage(package)
def __postprocessPackage(self, package):
# Set the ids
package['package_group'] = self.groups[package['package_group']]
package['source_rpm_id'] = self.sourceRPMs[package['source_rpm']]
package['checksum_id'] = self.checksums[(package['checksum_type'],
package['checksum'])]
package['sigchecksum_id'] = self.checksums[(package['sigchecksum_type'],
package['sigchecksum'])]
def _cleanup_object(self, object):
Import._cleanup_object(self, object)
if object.ignored:
object.id = object.first_package.id
def packageImporter(batch, backend, source=0, caller=None):
if source:
return SourcePackageImport(batch, backend, caller=caller)
return PackageImport(batch, backend, caller=caller)
| gpl-2.0 | -7,587,173,102,851,040,000 | 40.998092 | 156 | 0.577907 | false |
geertj/bluepass | bluepass/frontends/qt/passwordbutton.py | 1 | 10929 | #
# This file is part of Bluepass. Bluepass is Copyright (c) 2012-2013
# Geert Jansen.
#
# Bluepass is free software available under the GNU General Public License,
# version 3. See the file LICENSE distributed with this file for the exact
# licensing terms.
from __future__ import absolute_import, print_function
from PyQt4.QtCore import QTimer, Signal, Slot, Property, Qt, QPoint
from PyQt4.QtGui import (QPushButton, QStylePainter, QStyleOptionButton,
QStyle, QGridLayout, QWidget, QLabel, QSpinBox, QLineEdit, QFrame,
QApplication, QCheckBox, QFontMetrics)
class NoSelectSpinbox(QSpinBox):
"""This is a SpinBox that:
* Will not select the displayed text when the value changes.
* Does not accept keyboard input.
"""
def __init__(self, parent=None):
super(NoSelectSpinbox, self).__init__(parent)
self.setFocusPolicy(Qt.NoFocus)
def stepBy(self, amount):
super(NoSelectSpinbox, self).stepBy(amount)
self.lineEdit().deselect()
class StrengthIndicator(QLabel):
"""A password strength indicator.
This is a label that gives feedback on the strength of a password.
"""
Poor, Good, Excellent = range(3)
stylesheet = """
StrengthIndicator { border: 1px solid black; }
StrengthIndicator[strength="0"] { background-color: #ff2929; }
StrengthIndicator[strength="1"] { background-color: #4dd133; }
StrengthIndicator[strength="2"] { background-color: #4dd133; }
"""
def __init__(self, parent=None):
super(StrengthIndicator, self).__init__(parent)
self._strength = 0
self.setStyleSheet(self.stylesheet)
def getStrength(self):
return self._strength
def setStrength(self, strength):
self._strength = strength
if strength == self.Poor:
self.setText('Poor')
elif strength == self.Good:
self.setText('Good')
elif strength == self.Excellent:
self.setText('Excellent')
self.setStyleSheet(self.stylesheet)
strength = Property(int, getStrength, setStrength)
class PasswordConfiguration(QFrame):
"""Base class for password configuration popups.
A password popup is installed in a GeneratePasswordButton, and allows
the user to customize the parameters of password generation.
"""
def __init__(self, method, parent=None):
super(PasswordConfiguration, self).__init__(parent)
self.method = method
self.parameters = []
parametersChanged = Signal(str, list)
class DicewarePasswordConfiguration(PasswordConfiguration):
"""Configuration for Diceware password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(DicewarePasswordConfiguration, self).__init__('diceware', parent)
self.parameters = [5]
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' words')
spinbox.setMinimum(4)
spinbox.setMaximum(8)
grid.addWidget(spinbox, 0, 2)
label = QLabel('Security', self)
grid.addWidget(label, 1, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 1, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setParameters)
spinbox.setValue(self.parameters[0])
@Slot(int)
def setParameters(self, words):
self.parameters[0] = words
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We use Diceware only for locking our vaults. Because we know we
# do proper salting and key stretching, we add 20 extra bits.
strength += 20
if strength < 70:
strength = StrengthIndicator.Poor
elif strength < 94:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class RandomPasswordConfiguration(PasswordConfiguration):
"""Configuration for random password generation."""
stylesheet = """
PasswordConfiguration { border: 1px solid grey; }
"""
def __init__(self, parent=None):
super(RandomPasswordConfiguration, self).__init__('random', parent)
self.parameters = [12, '[a-z][A-Z][0-9]']
self.addWidgets()
self.setFixedSize(self.sizeHint())
self.setStyleSheet(self.stylesheet)
def addWidgets(self):
grid = QGridLayout()
self.setLayout(grid)
grid.setColumnMinimumWidth(1, 10)
label = QLabel('Length', self)
grid.addWidget(label, 0, 0)
spinbox = NoSelectSpinbox(self)
spinbox.setSuffix(' characters')
spinbox.setMinimum(6)
spinbox.setMaximum(20)
grid.addWidget(spinbox, 0, 2, 1, 2)
label = QLabel('Characters')
grid.addWidget(label, 1, 0)
def updateInclude(s):
def stateChanged(state):
self.updateInclude(state, s)
return stateChanged
lower = QCheckBox('Lower')
grid.addWidget(lower, 1, 2)
lower.stateChanged.connect(updateInclude('[a-z]'))
upper = QCheckBox('Upper')
grid.addWidget(upper, 1, 3)
upper.stateChanged.connect(updateInclude('[A-Z]'))
digits = QCheckBox('Digits')
grid.addWidget(digits, 2, 2)
digits.stateChanged.connect(updateInclude('[0-9]'))
special = QCheckBox('Special')
grid.addWidget(special, 2, 3)
special.stateChanged.connect(updateInclude('[!-/]'))
label = QLabel('Security', self)
grid.addWidget(label, 3, 0)
strength = StrengthIndicator(self)
grid.addWidget(strength, 3, 2)
self.strength = strength
spinbox.valueChanged.connect(self.setLength)
spinbox.setValue(self.parameters[0])
lower.setChecked('[a-z]' in self.parameters[1])
upper.setChecked('[A-Z]' in self.parameters[1])
digits.setChecked('[0-9]' in self.parameters[1])
special.setChecked('[!-/]' in self.parameters[1])
@Slot(int)
def setLength(self, length):
self.parameters[0] = length
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateInclude(self, enable, s):
if enable and s not in self.parameters[1]:
self.parameters[1] += s
elif not enable:
self.parameters[1] = self.parameters[1].replace(s, '')
self.parametersChanged.emit(self.method, self.parameters)
self.updateStrength()
@Slot()
def updateStrength(self):
backend = QApplication.instance().backend()
strength = backend.password_strength(self.method, *self.parameters)
# We do not know if the remote site does key stretching or salting.
# So we only give a Good rating if the entropy takes the password
# out of reach of the largest Rainbow tables.
if strength < 60:
strength = StrengthIndicator.Poor
elif strength < 84:
strength = StrengthIndicator.Good
else:
strength = StrengthIndicator.Excellent
self.strength.setStrength(strength)
class PopupButton(QPushButton):
"""A button with a popup.
The popup will be displayed just below the button after the user
keeps the button pressed for 500 msecs.
"""
def __init__(self, text, parent=None):
super(PopupButton, self).__init__(text, parent)
timer = QTimer()
timer.setSingleShot(True)
timer.setInterval(500)
timer.timeout.connect(self.showPopup)
self.timer = timer
self.popup = None
# I would have preferred to implement the menu indicator by overriding
# initStyleOption(), and nothing else, but it doesn't work. The C++
# ::paintEvent() and ::sizeHint() are not able to call into it. So we need
# to provide our own paintEvent() and sizeHint() too.
def initStyleOption(self, option):
super(PopupButton, self).initStyleOption(option)
option.features |= option.HasMenu
def paintEvent(self, event):
p = QStylePainter(self)
opts = QStyleOptionButton()
self.initStyleOption(opts)
p.drawControl(QStyle.CE_PushButton, opts)
def sizeHint(self):
size = super(PopupButton, self).sizeHint()
fm = QFontMetrics(QApplication.instance().font())
width = fm.width(self.text())
opts = QStyleOptionButton()
self.initStyleOption(opts)
style = self.style()
dw = style.pixelMetric(QStyle.PM_MenuButtonIndicator, opts, self)
size.setWidth(width + dw + 10)
return size
def mousePressEvent(self, event):
self.timer.start()
super(PopupButton, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
self.timer.stop()
super(PopupButton, self).mouseReleaseEvent(event)
def setPopup(self, popup):
popup.setParent(None)
popup.setWindowFlags(Qt.Popup)
popup.hide()
# Install a closeEvent() on the popup that raises the button.
def closeEvent(*args):
self.setDown(False)
popup.closeEvent = closeEvent
self.popup = popup
@Slot()
def showPopup(self):
if not self.popup:
return
pos = QPoint(self.width(), self.height())
pos = self.mapToGlobal(pos)
size = self.popup.size()
self.popup.move(pos.x() - size.width(), pos.y())
self.popup.show()
class GeneratePasswordButton(PopupButton):
"""A password generation button.
A password is generated each time the user clicks the button.
"""
def __init__(self, text, popup, parent=None):
super(GeneratePasswordButton, self).__init__(text, parent)
self.method = popup.method
self.parameters = popup.parameters
self.setPopup(popup)
popup.parametersChanged.connect(self.parametersChanged)
self.clicked.connect(self.generate)
@Slot(str, list)
def parametersChanged(self, method, parameters):
self.method = method
self.parameters = parameters
self.generate()
@Slot()
def generate(self):
backend = QApplication.instance().backend()
password = backend.generate_password(self.method, *self.parameters)
self.passwordGenerated.emit(password)
passwordGenerated = Signal(str)
| gpl-3.0 | -703,691,127,010,999,800 | 33.046729 | 79 | 0.637753 | false |
apophys/freeipa | ipalib/__init__.py | 1 | 34622 | # Authors:
# Jason Gerard DeRose <[email protected]>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Package containing the core library.
=============================
Tutorial for Plugin Authors
=============================
This tutorial will introduce you to writing plugins for freeIPA v2. It does
not cover every detail, but it provides enough to get you started and is
heavily cross-referenced with further documentation that (hopefully) fills
in the missing details.
In addition to this tutorial, the many built-in plugins in `ipalib.plugins`
and `ipaserver.plugins` provide real-life examples of how to write good
plugins.
----------------------------
How this tutorial is written
----------------------------
The code examples in this tutorial are presented as if entered into a Python
interactive interpreter session. As such, when you create a real plugin in
a source file, a few details will be different (in addition to the fact that
you will never include the ``>>>`` nor ``...`` that the interpreter places at
the beginning of each line of code).
The tutorial examples all have this pattern:
::
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command):
... pass
...
>>> api.add_plugin(my_command)
>>> api.finalize()
In the tutorial we call `create_api()` to create an *example* instance
of `plugable.API` to work with. But a real plugin will simply use
``ipalib.api``, the standard run-time instance of `plugable.API`.
A real plugin will have this pattern:
::
from ipalib import Command, Registry, api
register = Registry()
@register()
class my_command(Command):
pass
As seen above, also note that in a real plugin you will *not* call
`plugable.API.finalize()`. When in doubt, look at some of the built-in
plugins for guidance, like those in `ipalib.plugins`.
If you don't know what the Python *interactive interpreter* is, or are
confused about what this *Python* is in the first place, then you probably
should start with the Python tutorial:
http://docs.python.org/tutorial/index.html
------------------------------------
First steps: A simple command plugin
------------------------------------
Our first example will create the most basic command plugin possible. This
command will be seen in the list of command plugins, but it wont be capable
of actually doing anything yet.
A command plugin simultaneously adds a new command that can be called through
the command-line ``ipa`` script *and* adds a new XML-RPC method... the two are
one in the same, simply invoked in different ways.
A freeIPA plugin is a Python class, and when you create a plugin, you register
this class itself (instead of an instance of the class). To be a command
plugin, your plugin must subclass from `frontend.Command` (or from a subclass
thereof). Here is our first example:
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command): # Step 1, define class
... """My example plugin."""
...
>>> api.add_plugin(my_command) # Step 2, register class
Notice that we are registering the ``my_command`` class itself, not an
instance of ``my_command``.
Until `plugable.API.finalize()` is called, your plugin class has not been
instantiated nor does the ``Command`` namespace yet exist. For example:
>>> hasattr(api, 'Command')
False
>>> api.finalize() # plugable.API.finalize()
>>> hasattr(api.Command, 'my_command')
True
>>> api.Command.my_command.doc
Gettext('My example plugin.', domain='ipa', localedir=None)
Notice that your plugin instance is accessed through an attribute named
``my_command``, the same name as your plugin class name.
------------------------------
Make your command do something
------------------------------
This simplest way to make your example command plugin do something is to
implement a ``run()`` method, like this:
>>> class my_command(Command):
... """My example plugin with run()."""
...
... def run(self, **options):
... return dict(result='My run() method was called!')
...
>>> api = create_api()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command
{'result': 'My run() method was called!'}
When `frontend.Command.__call__()` is called, it first validates any arguments
and options your command plugin takes (if any) and then calls its ``run()``
method.
------------------------
Forwarding vs. execution
------------------------
However, unlike the example above, a typical command plugin will implement an
``execute()`` method instead of a ``run()`` method. Your command plugin can
be loaded in two distinct contexts:
1. In a *client* context - Your command plugin is only used to validate
any arguments and options it takes, and then ``self.forward()`` is
called, which forwards the call over XML-RPC to an IPA server where
the actual work is done.
2. In a *server* context - Your same command plugin validates any
arguments and options it takes, and then ``self.execute()`` is called,
which you should implement to perform whatever work your plugin does.
The base `frontend.Command.run()` method simply dispatches the call to
``self.execute()`` if ``self.env.in_server`` is True, or otherwise
dispatches the call to ``self.forward()``.
For example, say you have a command plugin like this:
>>> class my_command(Command):
... """Forwarding vs. execution."""
...
... def forward(self, **options):
... return dict(
... result='forward(): in_server=%r' % self.env.in_server
... )
...
... def execute(self, **options):
... return dict(
... result='execute(): in_server=%r' % self.env.in_server
... )
...
The ``options`` will contain a dict of command options. One option is added
automatically: ``version``. It contains the API version of the client.
In order to maintain forward compatibility, you should always specify the
API version current at the time you're writing your client.
If ``my_command`` is loaded in a *client* context, ``forward()`` will be
called:
>>> api = create_api()
>>> api.env.in_server = False # run() will dispatch to forward()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'forward(): in_server=False'}
On the other hand, if ``my_command`` is loaded in a *server* context,
``execute()`` will be called:
>>> api = create_api()
>>> api.env.in_server = True # run() will dispatch to execute()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'execute(): in_server=True'}
Normally there should be no reason to override `frontend.Command.forward()`,
but, as above, it can be done for demonstration purposes. In contrast, there
*is* a reason you might want to override `frontend.Command.run()`: if it only
makes sense to execute your command locally, if it should never be forwarded
to the server. In this case, you should implement your *do-stuff* in the
``run()`` method instead of in the ``execute()`` method.
For example, the ``ipa`` command line script has a ``help`` command
(`ipalib.cli.help`) that is specific to the command-line-interface and should
never be forwarded to the server.
---------------
Backend plugins
---------------
There are two types of plugins:
1. *Frontend plugins* - These are loaded in both the *client* and *server*
contexts. These need to be installed with any application built atop
the `ipalib` library. The built-in frontend plugins can be found in
`ipalib.plugins`. The ``my_command`` example above is a frontend
plugin.
2. *Backend plugins* - These are only loaded in a *server* context and
only need to be installed on the IPA server. The built-in backend
plugins can be found in `ipaserver.plugins`.
Backend plugins should provide a set of methods that standardize how IPA
interacts with some external system or library. For example, all interaction
with LDAP is done through the ``ldap`` backend plugin defined in
`ipaserver.plugins.b_ldap`. As a good rule of thumb, anytime you need to
import some package that is not part of the Python standard library, you
should probably interact with that package via a corresponding backend
plugin you implement.
Backend plugins are much more free-form than command plugins. Aside from a
few reserved attribute names, you can define arbitrary public methods on your
backend plugin.
Here is a simple example:
>>> from ipalib import Backend
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """Part of your API."""
... return 'Stuff got done.'
...
>>> api = create_api()
>>> api.add_plugin(my_backend)
>>> api.finalize()
>>> api.Backend.my_backend.do_stuff()
'Stuff got done.'
-------------------------------
How your command should do work
-------------------------------
We now return to our ``my_command`` plugin example.
Plugins are separated into frontend and backend plugins so that there are not
unnecessary dependencies required by an application that only uses `ipalib` and
its built-in frontend plugins (and then forwards over XML-RPC for execution).
But how do we avoid introducing additional dependencies? For example, the
``user_add`` command needs to talk to LDAP to add the user, yet we want to
somehow load the ``user_add`` plugin on client machines without requiring the
``python-ldap`` package (Python bindings to openldap) to be installed. To
answer that, we consult our golden rule:
**The golden rule:** A command plugin should implement its ``execute()``
method strictly via calls to methods on one or more backend plugins.
So the module containing the ``user_add`` command does not itself import the
Python LDAP bindings, only the module containing the ``ldap`` backend plugin
does that, and the backend plugins are only installed on the server. The
``user_add.execute()`` method, which is only called when in a server context,
is implemented as a series of calls to methods on the ``ldap`` backend plugin.
When `plugable.Plugin.__init__()` is called, each plugin stores a reference to
the `plugable.API` instance it has been loaded into. So your plugin can
access the ``my_backend`` plugin as ``self.api.Backend.my_backend``.
Additionally, convenience attributes are set for each namespace, so your
plugin can also access the ``my_backend`` plugin as simply
``self.Backend.my_backend``.
This next example will tie everything together. First we create our backend
plugin:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """my_command.execute() calls this."""
... return 'my_backend.do_stuff() indeed did do stuff!'
...
>>> api.add_plugin(my_backend)
Second, we have our frontend plugin, the command:
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Implemented against Backend.my_backend"""
... return dict(result=self.Backend.my_backend.do_stuff())
...
>>> api.add_plugin(my_command)
Lastly, we call ``api.finalize()`` and see what happens when we call
``my_command()``:
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47')
{'result': 'my_backend.do_stuff() indeed did do stuff!'}
When not in a server context, ``my_command.execute()`` never gets called, so
it never tries to access the non-existent backend plugin at
``self.Backend.my_backend.`` To emphasize this point, here is one last
example:
>>> api = create_api()
>>> api.env.in_server = False # We want to forward, not execute
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Same as above."""
... return dict(result=self.Backend.my_backend.do_stuff())
...
... def forward(self, **options):
... return dict(result='Just my_command.forward() getting called here.')
...
>>> api.add_plugin(my_command)
>>> api.finalize()
Notice that the ``my_backend`` plugin has certainly not be registered:
>>> hasattr(api.Backend, 'my_backend')
False
And yet we can call ``my_command()``:
>>> api.Command.my_command(version=u'2.47')
{'result': 'Just my_command.forward() getting called here.'}
----------------------------------------
Calling other commands from your command
----------------------------------------
It can be useful to have your ``execute()`` method call other command plugins.
Among other things, this allows for meta-commands that conveniently call
several other commands in a single operation. For example:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class meta_command(Command):
... """My meta-command plugin."""
...
... def execute(self, **options):
... """Calls command_1(), command_2()"""
... msg = '%s; %s.' % (
... self.Command.command_1()['result'],
... self.Command.command_2()['result'],
... )
... return dict(result=msg)
>>> class command_1(Command):
... def execute(self, **options):
... return dict(result='command_1.execute() called')
...
>>> class command_2(Command):
... def execute(self, **options):
... return dict(result='command_2.execute() called')
...
>>> api.add_plugin(meta_command)
>>> api.add_plugin(command_1)
>>> api.add_plugin(command_2)
>>> api.finalize()
>>> api.Command.meta_command(version=u'2.47')
{'result': 'command_1.execute() called; command_2.execute() called.'}
Because this is quite useful, we are going to revise our golden rule somewhat:
**The revised golden rule:** A command plugin should implement its
``execute()`` method strictly via what it can access through ``self.api``,
most likely via the backend plugins in ``self.api.Backend`` (which can also
be conveniently accessed as ``self.Backend``).
-----------------------------------------------
Defining arguments and options for your command
-----------------------------------------------
You can define a command that will accept specific arguments and options.
For example:
>>> from ipalib import Str
>>> class nudge(Command):
... """Takes one argument, one option"""
...
... takes_args = ('programmer',)
...
... takes_options = (Str('stuff', default=u'documentation'))
...
... def execute(self, programmer, **kw):
... return dict(
... result='%s, go write more %s!' % (programmer, kw['stuff'])
... )
...
>>> api = create_api()
>>> api.env.in_server = True
>>> api.add_plugin(nudge)
>>> api.finalize()
>>> api.Command.nudge(u'Jason', version=u'2.47')
{'result': u'Jason, go write more documentation!'}
>>> api.Command.nudge(u'Jason', stuff=u'unit tests', version=u'2.47')
{'result': u'Jason, go write more unit tests!'}
The ``args`` and ``options`` attributes are `plugable.NameSpace` instances
containing a command's arguments and options, respectively, as you can see:
>>> list(api.Command.nudge.args) # Iterates through argument names
['programmer']
>>> api.Command.nudge.args.programmer
Str('programmer')
>>> list(api.Command.nudge.options) # Iterates through option names
['stuff', 'version']
>>> api.Command.nudge.options.stuff
Str('stuff', default=u'documentation')
>>> api.Command.nudge.options.stuff.default
u'documentation'
The 'version' option is added to commands automatically.
The arguments and options must not contain colliding names. They are both
merged together into the ``params`` attribute, another `plugable.NameSpace`
instance, as you can see:
>>> api.Command.nudge.params
NameSpace(<3 members>, sort=False)
>>> list(api.Command.nudge.params) # Iterates through the param names
['programmer', 'stuff', 'version']
When calling a command, its positional arguments can also be provided as
keyword arguments, and in any order. For example:
>>> api.Command.nudge(stuff=u'lines of code', programmer=u'Jason', version=u'2.47')
{'result': u'Jason, go write more lines of code!'}
When a command plugin is called, the values supplied for its parameters are
put through a sophisticated processing pipeline that includes steps for
normalization, type conversion, validation, and dynamically constructing
the defaults for missing values. The details wont be covered here; however,
here is a quick teaser:
>>> from ipalib import Int
>>> class create_player(Command):
... takes_options = (
... 'first',
... 'last',
... Str('nick',
... normalizer=lambda value: value.lower(),
... default_from=lambda first, last: first[0] + last,
... ),
... Int('points', default=0),
... )
...
>>> cp = create_player()
>>> cp.finalize()
>>> cp.convert(points=u' 1000 ')
{'points': 1000}
>>> cp.normalize(nick=u'NickName')
{'nick': u'nickname'}
>>> cp.get_default(first=u'Jason', last=u'DeRose')
{'nick': u'jderose', 'points': 0}
For the full details on the parameter system, see the
`frontend.parse_param_spec()` function, and the `frontend.Param` and
`frontend.Command` classes.
---------------------------------------
Allowed return values from your command
---------------------------------------
The return values from your command can be rendered by different user
interfaces (CLI, web-UI); furthermore, a call to your command can be
transparently forwarded over the network (XML-RPC, JSON). As such, the return
values from your command must be usable by the least common denominator.
Your command should return only simple data types and simple data structures,
the kinds that can be represented in an XML-RPC request or in the JSON format.
The return values from your command's ``execute()`` method can include only
the following:
Simple scalar values:
These can be ``str``, ``unicode``, ``int``, and ``float`` instances,
plus the ``True``, ``False``, and ``None`` constants.
Simple compound values:
These can be ``dict``, ``list``, and ``tuple`` instances. These
compound values must contain only the simple scalar values above or
other simple compound values. These compound values can also be empty.
For our purposes here, the ``list`` and ``tuple`` types are equivalent
and can be used interchangeably.
Also note that your ``execute()`` method should not contain any ``print``
statements or otherwise cause any output on ``sys.stdout``. Your command can
(and should) produce log messages by using a module-level logger (see below).
To learn more about XML-RPC (XML Remote Procedure Call), see:
http://docs.python.org/library/xmlrpclib.html
http://en.wikipedia.org/wiki/XML-RPC
To learn more about JSON (Java Script Object Notation), see:
http://docs.python.org/library/json.html
http://www.json.org/
---------------------------------------
How your command should print to stdout
---------------------------------------
As noted above, your command should not print anything while in its
``execute()`` method. So how does your command format its output when
called from the ``ipa`` script?
After the `cli.CLI.run_cmd()` method calls your command, it will call your
command's ``output_for_cli()`` method (if you have implemented one).
If you implement an ``output_for_cli()`` method, it must have the following
signature:
::
output_for_cli(textui, result, *args, **options)
textui
An object implementing methods for outputting to the console.
Currently the `ipalib.cli.textui` plugin is passed, which your method
can also access as ``self.Backend.textui``. However, in case this
changes in the future, your method should use the instance passed to
it in this first argument.
result
This is the return value from calling your command plugin. Depending
upon how your command is implemented, this is probably the return
value from your ``execute()`` method.
args
The arguments your command was called with. If your command takes no
arguments, you can omit this. You can also explicitly list your
arguments rather than using the generic ``*args`` form.
options
The options your command was called with. If your command takes no
options, you can omit this. If your command takes any options, you
must use the ``**options`` form as they will be provided strictly as
keyword arguments.
For example, say we setup a command like this:
>>> class show_items(Command):
...
... takes_args = ('key?',)
...
... takes_options = (Flag('reverse'),)
...
... def execute(self, key, **options):
... items = dict(
... fruit=u'apple',
... pet=u'dog',
... city=u'Berlin',
... )
... if key in items:
... return dict(result=items[key])
... items = [
... (k, items[k]) for k in sorted(items, reverse=options['reverse'])
... ]
... return dict(result=items)
...
... def output_for_cli(self, textui, result, key, **options):
... result = result['result']
... if key is not None:
... textui.print_plain('%s = %r' % (key, result))
... else:
... textui.print_name(self.name)
... textui.print_keyval(result)
... format = '%d items'
... if options['reverse']:
... format += ' (in reverse order)'
... textui.print_count(result, format)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True) # We want to execute, not forward
>>> api.add_plugin(show_items)
>>> api.finalize()
Normally when you invoke the ``ipa`` script, `cli.CLI.load_plugins()` will
register the `cli.textui` backend plugin, but for the sake of our example,
we will just create an instance here:
>>> from ipalib import cli
>>> textui = cli.textui() # We'll pass this to output_for_cli()
Now for what we are concerned with in this example, calling your command
through the ``ipa`` script basically will do the following:
>>> result = api.Command.show_items()
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=False)
-----------
show-items:
-----------
city = u'Berlin'
fruit = u'apple'
pet = u'dog'
-------
3 items
-------
Similarly, calling it with ``reverse=True`` would result in the following:
>>> result = api.Command.show_items(reverse=True)
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=True)
-----------
show-items:
-----------
pet = u'dog'
fruit = u'apple'
city = u'Berlin'
--------------------------
3 items (in reverse order)
--------------------------
Lastly, providing a ``key`` would result in the following:
>>> result = api.Command.show_items(u'city')
>>> api.Command.show_items.output_for_cli(textui, result, 'city', reverse=False)
city = u'Berlin'
See the `ipalib.cli.textui` plugin for a description of its methods.
------------------------
Logging from your plugin
------------------------
Plugins should log through a module-level logger.
For example:
>>> import logging
>>> logger = logging.getLogger(__name__)
>>> class paint_house(Command):
...
... takes_args = 'color'
...
... def execute(self, color, **options):
... """Uses logger.error()"""
... if color not in ('red', 'blue', 'green'):
... logger.error("I don't have %s paint!", color) # Log error
... return
... return 'I painted the house %s.' % color
...
Some basic knowledge of the Python ``logging`` module might be helpful. See:
http://docs.python.org/library/logging.html
The important thing to remember is that your plugin should not configure
logging itself, but should instead simply use the module-level logger.
Also see the `plugable.API.bootstrap()` method for details on how the logging
is configured.
---------------------
Environment variables
---------------------
Plugins access configuration variables and run-time information through
``self.api.env`` (or for convenience, ``self.env`` is equivalent). This
attribute is a refences to the `ipalib.config.Env` instance created in
`plugable.API.__init__()`.
After `API.bootstrap()` has been called, the `Env` instance will be populated
with all the environment information used by the built-in plugins.
This will be called before any plugins are registered, so plugin authors can
assume these variables will all exist by the time the module containing their
plugin (or plugins) is imported.
`Env._bootstrap()`, which is called by `API.bootstrap()`, will create several
run-time variables that cannot be overridden in configuration files or through
command-line options. Here is an overview of this run-time information:
============= ============================= =======================
Key Example value Description
============= ============================= =======================
bin '/usr/bin' Dir. containing script
dot_ipa '/home/jderose/.ipa' User config directory
home os.environ['HOME'] User home dir.
ipalib '.../site-packages/ipalib' Dir. of ipalib package
mode 'unit_test' The mode ipalib is in
script sys.argv[0] Path of script
site_packages '.../python2.5/site-packages' Dir. containing ipalib/
============= ============================= =======================
If your plugin requires new environment variables *and* will be included in
the freeIPA built-in plugins, you should add the defaults for your variables
in `ipalib.constants.DEFAULT_CONFIG`. Also, you should consider whether your
new environment variables should have any auto-magic logic to determine their
values if they haven't already been set by the time `config.Env._bootstrap()`,
`config.Env._finalize_core()`, or `config.Env._finalize()` is called.
On the other hand, if your plugin requires new environment variables and will
be installed in a 3rd-party package, your plugin should set these variables
in the module it is defined in.
`config.Env` values work on a first-one-wins basis... after a value has been
set, it can not be overridden with a new value. As any variables can be set
using the command-line ``-e`` global option or set in a configuration file,
your module must check whether a variable has already been set before
setting its default value. For example:
>>> if 'message_of_the_day' not in api.env:
... api.env.message_of_the_day = 'Hello, world!'
...
Your plugin can access any environment variables via ``self.env``.
For example:
>>> class motd(Command):
... """Print message of the day."""
...
... def execute(self, **options):
... return dict(result=self.env.message)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True, message='Hello, world!')
>>> api.add_plugin(motd)
>>> api.finalize()
>>> api.Command.motd(version=u'2.47')
{'result': u'Hello, world!'}
Also see the `plugable.API.bootstrap_with_global_options()` method.
---------------------------------------------
Indispensable ipa script commands and options
---------------------------------------------
The ``console`` command will launch a custom interactive Python interpreter
session. The global environment will have an ``api`` variable, which is the
standard `plugable.API` instance found at ``ipalib.api``. All plugins will
have been loaded (well, except the backend plugins if ``in_server`` is False)
and ``api`` will be fully initialized. To launch the console from within the
top-level directory in the source tree, just run ``ipa console`` from a
terminal, like this:
::
$ ./ipa console
By default, ``in_server`` is False. If you want to start the console in a
server context (so that all the backend plugins are loaded), you can use the
``-e`` option to set the ``in_server`` environment variable, like this:
::
$ ./ipa -e in_server=True console
You can specify multiple environment variables by including the ``-e`` option
multiple times, like this:
::
$ ./ipa -e in_server=True -e mode=dummy console
The space after the ``-e`` is optional. This is equivalent to the above command:
::
$ ./ipa -ein_server=True -emode=dummy console
The ``env`` command will print out the full environment in key=value pairs,
like this:
::
$ ./ipa env
If you use the ``--server`` option, it will forward the call to the server
over XML-RPC and print out what the environment is on the server, like this:
::
$ ./ipa env --server
The ``plugins`` command will show details of all the plugin that are loaded,
like this:
::
$ ./ipa plugins
-----------------------------------
Learning more about freeIPA plugins
-----------------------------------
To learn more about writing freeIPA plugins, you should:
1. Look at some of the built-in plugins, like the frontend plugins in
`ipalib.plugins.f_user` and the backend plugins in
`ipaserver.plugins.b_ldap`.
2. Learn about the base classes for frontend plugins in `ipalib.frontend`.
3. Learn about the core plugin framework in `ipalib.plugable`.
Furthermore, the freeIPA plugin architecture was inspired by the Bazaar plugin
architecture. Although the two are different enough that learning how to
write plugins for Bazaar will not particularly help you write plugins for
freeIPA, some might be interested in the documentation on writing plugins for
Bazaar, available here:
http://bazaar-vcs.org/WritingPlugins
If nothing else, we just want to give credit where credit is deserved!
However, freeIPA does not use any *code* from Bazaar... it merely borrows a
little inspiration.
--------------------------
A note on docstring markup
--------------------------
Lastly, a quick note on markup: All the Python docstrings in freeIPA v2
(including this tutorial) use the *reStructuredText* markup language. For
information on reStructuredText, see:
http://docutils.sourceforge.net/rst.html
For information on using reStructuredText markup with epydoc, see:
http://epydoc.sourceforge.net/manual-othermarkup.html
--------------------------------------------------
Next steps: get involved with freeIPA development!
--------------------------------------------------
The freeIPA team is always interested in feedback and contribution from the
community. To get involved with freeIPA, see the *Contribute* page on
freeIPA.org:
http://freeipa.org/page/Contribute
'''
from ipapython.version import VERSION as __version__
def _enable_warnings(error=False):
"""Enable additional warnings during development
"""
import ctypes
import warnings
# get reference to Py_BytesWarningFlag from Python CAPI
byteswarnings = ctypes.c_int.in_dll( # pylint: disable=no-member
ctypes.pythonapi, 'Py_BytesWarningFlag')
if byteswarnings.value >= 2:
# bytes warnings flag already set to error
return
# default warning mode for all modules: warn once per location
warnings.simplefilter('default', BytesWarning)
if error:
byteswarnings.value = 2
action = 'error'
else:
byteswarnings.value = 1
action = 'default'
module = '(ipa.*|__main__)'
warnings.filterwarnings(action, category=BytesWarning, module=module)
warnings.filterwarnings(action, category=DeprecationWarning,
module=module)
# call this as early as possible
if 'git' in __version__:
_enable_warnings(False)
# noqa: E402
from ipalib import plugable
from ipalib.backend import Backend
from ipalib.frontend import Command, LocalOrRemote, Updater
from ipalib.frontend import Object, Method
from ipalib.crud import Create, Retrieve, Update, Delete, Search
from ipalib.parameters import DefaultFrom, Bool, Flag, Int, Decimal, Bytes, Str, IA5Str, Password, DNParam
from ipalib.parameters import (BytesEnum, StrEnum, IntEnum, AccessTime, File,
DateTime, DNSNameParam)
from ipalib.errors import SkipPluginModule
from ipalib.text import _, ngettext, GettextFactory, NGettextFactory
Registry = plugable.Registry
class API(plugable.API):
bases = (Command, Object, Method, Backend, Updater)
@property
def packages(self):
if self.env.in_server:
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.plugins
# pylint: enable=import-error,ipa-forbidden-import
result = (
ipaserver.plugins,
)
else:
import ipaclient.remote_plugins
import ipaclient.plugins
result = (
ipaclient.remote_plugins.get_package(self),
ipaclient.plugins,
)
if self.env.context in ('installer', 'updates'):
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.install.plugins
# pylint: enable=import-error,ipa-forbidden-import
result += (ipaserver.install.plugins,)
return result
def create_api(mode='dummy'):
"""
Return standard `plugable.API` instance.
This standard instance allows plugins that subclass from the following
base classes:
- `frontend.Command`
- `frontend.Object`
- `frontend.Method`
- `backend.Backend`
"""
api = API()
if mode is not None:
api.env.mode = mode
assert mode != 'production'
return api
api = create_api(mode=None)
| gpl-3.0 | 9,104,041,449,937,767,000 | 34.220753 | 106 | 0.658136 | false |
lnls-fac/scripts | bin/delete_dups.py | 1 | 3559 | #!/usr/bin/python
import subprocess
import sys
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
@staticmethod
def purple(string):
return bcolors.HEADER + string + bcolors.ENDC
@staticmethod
def blue(string):
return bcolors.OKBLUE + string + bcolors.ENDC
@staticmethod
def green(string):
return bcolors.OKGREEN + string + bcolors.ENDC
@staticmethod
def yellow(string):
return bcolors.WARNING + string + bcolors.ENDC
@staticmethod
def red(string):
return bcolors.FAIL + string + bcolors.ENDC
def get_size_str(size):
if size < 1024:
return '{0} bytes'.format(size)
elif size < 1024*1024:
return '{0:.1f} Kb'.format(1.0*size/1024.0)
elif size < 1024*1024*1024:
return '{0:.1f} Mb'.format(1.0*size/1024.0/1024.0)
else:
return '{0:.1f} Gb'.format(1.0*size/1024.0/1024.0/1024.0)
def grab_duplicates(folder, pname):
try:
p = subprocess.Popen(['fdupes', '-r', '-S', folder],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print(''.join([pname, ': could not run fdupes command! Is package installed?']))
sys.exit()
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
lines = output.split('\n')
sizes, files = [],[]
size = None
for line in lines:
if 'bytes each:' in line:
if size is not None:
sizes.append(size)
files.append(dupfiles)
words = line.split(' ')
size = int(words[0])
dupfiles = []
elif len(line)>0:
dupfiles.append(line)
if size is not None:
sizes.append(size)
files.append(dupfiles)
idx = sorted(range(len(sizes)), key=lambda k: sizes[k]*(len(files[k])-1), reverse = True)
dups = [(sizes[i],files[i]) for i in idx]
return dups
def select_files_simple(dups):
size = 0
for dup in dups:
print(bcolors.yellow('size of each file: ' + get_size_str(dup[0])))
for fname in dup[1]:
print(fname)
size += dup[0] * (len(dup[1]) - 1)
print('')
print(bcolors.yellow('selection has ' + get_size_str(size) + ' of duplicates.'))
def select_files_substring(dups, substring):
size = 0
files = []
for dup in dups:
''' checks how many of duplicate files are selected '''
nr_included = 0
for fname in dup[1]:
if substring in fname:
nr_included += 1
if nr_included == 0:
continue
''' loops over files of duplicates that has at least one selection '''
print(bcolors.yellow('size of each file: ' + get_size_str(dup[0])))
for fname in dup[1]:
if substring in fname:
print(bcolors.blue(fname))
files.append(fname)
size += dup[0]
else:
print(fname)
''' in case all duplicate files are selected warns and exits '''
if nr_included == len(dup[1]):
print('')
print(bcolors.red('selection of all files in duplicate is not allowed!'))
sys.exit()
print('')
''' prints size of selection and returns list '''
print(bcolors.yellow('selection has ' + get_size_str(size) + ' of duplicates.'))
return files
def main():
pname = sys.argv[0]
folder = sys.argv[1]
dups = grab_duplicates(folder, pname)
if len(sys.argv) == 2:
select_files_simple(dups)
elif len(sys.argv) == 3:
substring = sys.argv[2]
substring = substring.strip('"')
files = select_files_substring(dups, substring)
elif (len(sys.argv) == 4) and (sys.argv[3] == 'delete'):
substring = sys.argv[2]
files = select_files_substring(dups, substring)
for fname in files:
os.remove(fname)
main()
| mit | 112,618,110,065,499,700 | 23.210884 | 90 | 0.650745 | false |
belokop/indico_bare | doc/dev/source/conf.py | 1 | 6719 | # -*- coding: utf-8 -*-
#
# cds-indico documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 29 13:19:24 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path = [os.path.abspath('../../../'), os.path.abspath('../../../indico'), os.path.abspath('.')] + sys.path
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'repoze.sphinx.autointerface',
'exec_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Indico'
copyright = u'2015, Indico Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9.7'
# The full version, including alpha/beta/rc tags.
release = '1.9.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to sourc esphinx directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# include __init__() as well
autoclass_content = "both"
autodoc_default_flags = ['members', 'show-inheritance']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'indicodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'indico.tex', u'Indico Documentation',
u'Indico Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-3.0 | 8,301,756,260,777,005,000 | 31.936275 | 110 | 0.701444 | false |
akuendig/RxPython | rx/linq/takeLast.py | 1 | 4061 | from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal import Struct
from rx.observable import Producer
import rx.linq.sink
from collections import deque
class TakeLastCount(Producer):
def __init__(self, source, count, scheduler):
self.source = source
self.count = count
self.scheduler = scheduler
def run(self, observer, cancel, setSink):
sink = self.Sink(self, observer, cancel)
setSink(sink)
return sink.run()
class Sink(rx.linq.sink.Sink):
def __init__(self, parent, observer, cancel):
super(TakeLastCount.Sink, self).__init__(observer, cancel)
self.parent = parent
self.queue = deque()
def run(self):
self.subscription = SingleAssignmentDisposable()
self.loopDisposable = SingleAssignmentDisposable()
self.subscription.disposable = self.parent.source.subscribeSafe(self)
return CompositeDisposable(self.subscription, self.loopDisposable)
def onNext(self, value):
self.queue.append(value)
if len(self.queue) > self.parent.count:
self.queue.popleft()
def onError(self, exception):
self.observer.onError(exception)
self.dispose()
def onCompleted(self):
self.subscription.dispose()
scheduler = self.parent.scheduler
if scheduler.isLongRunning:
self.loopDisposable.disposable = scheduler.scheduleLongRunning(self.loop)
else:
self.loopDisposable.disposable = scheduler.scheduleRecursive(self.loopRec)
def loopRec(self, recurse):
if len(self.queue) > 0:
self.observer.onNext(self.queue.popleft())
recurse()
else:
self.observer.onCompleted()
self.dispose()
def loop(self, cancel):
while not cancel.isDisposed:
if len(self.queue) == 0:
self.observer.onCompleted()
break
else:
self.observer.onNext(self.queue.popleft())
self.dispose()
class TakeLastTime(Producer):
def __init__(self, source, duration, scheduler):
self.source = source
self.duration = duration
self.scheduler = scheduler
def run(self, observer, cancel, setSink):
sink = self.Sink(self, observer, cancel)
setSink(sink)
return sink.run()
class Sink(rx.linq.sink.Sink):
def __init__(self, parent, observer, cancel):
super(TakeLastTime.Sink, self).__init__(observer, cancel)
self.parent = parent
def run(self):
self.subscription = SingleAssignmentDisposable()
self.loop = SingleAssignmentDisposable()
self.startTime = self.parent.scheduler.now()
self.subscription.disposable = self.parent.source.subscribeSafe(self)
return CompositeDisposable(self.subscription, self.loop)
def elapsed(self):
return self.parent.scheduler.now() - self.startTime
def trim(self, now):
while len(self.queue) > 0:
current = self.queue.popleft()
if current.interval < self.parent.duration:
self.queue.appendleft(current)
break
def onNext(self, value):
now = self.elapsed()
self.queue.append(Struct(value=value,interval=now))
self.trim(now)
def onError(self, exception):
self.observer.onError(exception)
self.dispose()
def onCompleted(self):
self.subscription.dispose()
now = self.elapsed()
self.trim(now)
scheduler = self.parent.scheduler
if scheduler.isLongRunning:
self.loop.disposable = scheduler.scheduleLongRunning(self.loop)
else:
self.loop.disposable = scheduler.scheduleRecursive(self.loopRec)
def loopRec(self, recurse):
if len(self.queue) > 0:
self.observer.onNext(self.queue.popleft().value)
recurse()
else:
self.observer.onCompleted()
self.dispose()
def loop(self, cancel):
while not cancel.isDisposed:
if len(self.queue) == 0:
self.observer.onCompleted()
break
else:
self.observer.onNext(self.queue.popleft().value)
self.dispose() | mit | -6,765,602,060,497,232,000 | 27.013793 | 82 | 0.658951 | false |
stoivo/GitSavvy | core/commands/remote.py | 1 | 1346 | import sublime
from sublime_plugin import TextCommand
from ..git_command import GitCommand
from ...common import util
from ..ui_mixins.quick_panel import show_remote_panel
class GsRemoteAddCommand(TextCommand, GitCommand):
"""
Add remotes
"""
def run(self, edit):
# Get remote name from user
self.view.window().show_input_panel("Remote URL", "", self.on_enter_remote, None, None)
def on_enter_remote(self, input_url):
self.url = input_url
owner = self.username_from_url(input_url)
self.view.window().show_input_panel("Remote name", owner, self.on_enter_name, None, None)
def on_enter_name(self, remote_name):
self.git("remote", "add", remote_name, self.url)
if sublime.ok_cancel_dialog("Your remote was added successfully. Would you like to fetch from this remote?"):
self.view.window().run_command("gs_fetch", {"remote": remote_name})
class GsRemoteRemoveCommand(TextCommand, GitCommand):
"""
Remove remotes
"""
def run(self, edit):
show_remote_panel(self.on_remote_selection)
def on_remote_selection(self, remote):
if not remote:
return
@util.actions.destructive(description="remove a remote")
def remove():
self.git("remote", "remove", remote)
remove()
| mit | -3,694,500,971,462,770,700 | 28.26087 | 118 | 0.645617 | false |
gem/oq-engine | openquake/hazardlib/near_fault.py | 1 | 18706 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.nearfault` provides methods for near fault
PSHA calculation.
"""
import math
import numpy as np
from openquake.hazardlib.geo import geodetic as geod
import scipy.spatial.distance as dst
def get_xyz_from_ll(projected, reference):
"""
This method computes the x, y and z coordinates of a set of points
provided a reference point
:param projected:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the coordinates of target point to be projected
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the coordinates of the reference point.
:returns:
x
y
z
"""
azims = geod.azimuth(reference.longitude, reference.latitude,
projected.longitude, projected.latitude)
depths = np.subtract(reference.depth, projected.depth)
dists = geod.geodetic_distance(reference.longitude,
reference.latitude,
projected.longitude,
projected.latitude)
return (dists * math.sin(math.radians(azims)),
dists * math.cos(math.radians(azims)),
depths)
def get_plane_equation(p0, p1, p2, reference):
'''
Define the equation of target fault plane passing through 3 given points
which includes two points on the fault trace and one point on the
fault plane but away from the fault trace. Note: in order to remain the
consistency of the fault normal vector direction definition, the order
of the three given points is strickly defined.
:param p0:
The fault trace and is the closer points from the starting point of
fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param p1:
The fault trace and is the further points from the starting point of
fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param p2:
The point on the fault plane but away from the fault trace.
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the one vertex of the fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the origin of the cartesian system used the represent
objects in a projected reference
:returns:
normal: normal vector of the plane (a,b,c)
dist_to_plane: d in the plane equation, ax + by + cz = d
'''
p0_xyz = get_xyz_from_ll(p0, reference)
p1_xyz = get_xyz_from_ll(p1, reference)
p2_xyz = get_xyz_from_ll(p2, reference)
p0 = np.array(p0_xyz)
p1 = np.array(p1_xyz)
p2 = np.array(p2_xyz)
u = p1 - p0
v = p2 - p0
# vector normal to plane, ax+by+cy = d, normal=(a,b,c)
normal = np.cross(u, v)
# Define the d for the plane equation
dist_to_plane = np.dot(p0, normal)
return normal, dist_to_plane
def projection_pp(site, normal, dist_to_plane, reference):
'''
This method finds the projection of the site onto the plane containing
the slipped area, defined as the Pp(i.e. 'perpendicular projection of
site location onto the fault plane' Spudich et al. (2013) - page 88)
given a site.
:param site:
Location of the site, [lon, lat, dep]
:param normal:
Normal to the plane including the fault patch,
describe by a normal vector[a, b, c]
:param dist_to_plane:
D in the plane equation, ax + by + cz = d
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of project reference point
:returns:
pp, the projection point, [ppx, ppy, ppz], in xyz domain
, a numpy array.
'''
# Transform to xyz coordinate
[site_x, site_y, site_z] = get_xyz_from_ll(site, reference)
a = np.array([(1, 0, 0, -normal[0]),
(0, 1, 0, -normal[1]),
(0, 0, 1, -normal[2]),
(normal[0], normal[1], normal[2], 0)])
b = np.array([site_x, site_y, site_z, dist_to_plane])
x = np.linalg.solve(a, b)
pp = np.array([x[0], x[1], x[2]])
return pp
def vectors2angle(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'.
:param v1:
vector, a numpy array
:param v2:
vector, a numpy array
:returns:
the angle in radians between the two vetors
"""
cosang = np.dot(v1, v2)
sinang = np.linalg.norm(np.cross(v1, v2))
return np.arctan2(sinang, cosang)
def average_s_rad(site, hypocenter, reference, pp,
normal, dist_to_plane, e, p0, p1, delta_slip):
"""
Gets the average S-wave radiation pattern given an e-path as described in:
Spudich et al. (2013) "Final report of the NGA-West2 directivity working
group", PEER report, page 90- 92 and computes: the site to the direct point
distance, rd, and the hypocentral distance, r_hyp.
:param site:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the target site
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of hypocenter
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location
of the reference point for coordinate projection within the
calculation. The suggested reference point is Epicentre.
:param pp:
the projection point pp on the patch plane,
a numpy array
:param normal:
normal of the plane, describe by a normal vector[a, b, c]
:param dist_to_plane:
d is the constant term in the plane equation, e.g., ax + by + cz = d
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param p0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the starting point on fault segment
:param p1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of the ending point on fault segment.
:param delta_slip:
slip direction away from the strike direction, in decimal degrees.
A positive angle is generated by a counter-clockwise rotation.
:returns:
fs, float value of the average S-wave radiation pattern.
rd, float value of the distance from site to the direct point.
r_hyp, float value of the hypocetre distance.
"""
# Obtain the distance of Ps and Pp. If Ps is above the fault plane
# zs is positive, and negative when Ps is below the fault plane
site_xyz = get_xyz_from_ll(site, reference)
zs = dst.pdist([pp, site_xyz])
if site_xyz[0] * normal[0] + site_xyz[1] * normal[1] + site_xyz[2] * \
normal[2] - dist_to_plane > 0:
zs = -zs
# Obtain the distance of Pp and hypocentre
hyp_xyz = get_xyz_from_ll(hypocenter, reference)
hyp_xyz = np.array(hyp_xyz).reshape(1, 3).flatten()
l2 = dst.pdist([pp, hyp_xyz])
rd = ((l2 - e) ** 2 + zs ** 2) ** 0.5
r_hyp = (l2 ** 2 + zs ** 2) ** 0.5
p0_xyz = get_xyz_from_ll(p0, reference)
p1_xyz = get_xyz_from_ll(p1, reference)
u = (np.array(p1_xyz) - np.array(p0_xyz))
v = pp - hyp_xyz
phi = vectors2angle(u, v) - np.deg2rad(delta_slip)
ix = np.cos(phi) * (2 * zs * (l2 / r_hyp - (l2 - e) / rd) -
zs * np.log((l2 + r_hyp) / (l2 - e + rd)))
inn = np.cos(phi) * (-2 * zs ** 2 * (1 / r_hyp - 1 / rd)
- (r_hyp - rd))
iphi = np.sin(phi) * (zs * np.log((l2 + r_hyp) / (l2 - e + rd)))
# Obtain the final average radiation pattern value
fs = (ix ** 2 + inn ** 2 + iphi ** 2) ** 0.5 / e
return fs, rd, r_hyp
def isochone_ratio(e, rd, r_hyp):
"""
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
"""
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime
def _intersection(seg1_start, seg1_end, seg2_start, seg2_end):
"""
Get the intersection point between two segments. The calculation is in
Catestian coordinate system.
:param seg1_start:
A numpy array,
representing one end point of a segment(e.g. segment1)
segment.
:param seg1_end:
A numpy array,
representing the other end point of the first segment(e.g. segment1)
:param seg2_start:
A numpy array,
representing one end point of the other segment(e.g. segment2)
segment.
:param seg2_end:
A numpy array,
representing the other end point of the second segment(e.g. segment2)
:returns:
p_intersect, :a numpy ndarray.
representing the location of intersection point of the two
given segments
vector1, a numpy array, vector defined by intersection point and
seg2_end
vector2, a numpy array, vector defined by seg2_start and seg2_end
vector3, a numpy array, vector defined by seg1_start and seg1_end
vector4, a numpy array, vector defined by intersection point
and seg1_start
"""
pa = np.array([seg1_start, seg2_start])
pb = np.array([seg1_end, seg2_end])
si = pb - pa
ni = si / np.power(
np.dot(np.sum(si ** 2, axis=1).reshape(2, 1),
np.ones((1, 3))), 0.5)
nx = ni[:, 0].reshape(2, 1)
ny = ni[:, 1].reshape(2, 1)
nz = ni[:, 2].reshape(2, 1)
sxx = np.sum(nx ** 2 - 1)
syy = np.sum(ny ** 2 - 1)
szz = np.sum(nz ** 2 - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
s = np.array([sxx, sxy, sxz, sxy, syy, syz, sxz, syz,
szz]).reshape(3, 3)
cx = np.sum(pa[:, 0].reshape(2, 1) * (nx ** 2 - 1) +
pa[:, 1].reshape(2, 1) * [nx * ny] +
pa[:, 2].reshape(2, 1) * (nx * nz))
cy = np.sum(pa[:, 0].reshape(2, 1) * [nx * ny] +
pa[:, 1].reshape(2, 1) * [ny ** 2 - 1] +
pa[:, 2].reshape(2, 1) * [ny * nz])
cz = np.sum(pa[:, 0].reshape(2, 1) * [nx * nz] +
pa[:, 1].reshape(2, 1) * [ny * nz] +
pa[:, 2].reshape(2, 1) * [nz ** 2 - 1])
c = np.array([cx, cy, cz]).reshape(3, 1)
p_intersect = np.linalg.solve(s, c)
vector1 = (p_intersect.flatten() - seg2_end) / \
sum((p_intersect.flatten() - seg2_end) ** 2) ** 0.5
vector2 = (seg2_start - seg2_end) / \
sum((seg2_start - seg2_end) ** 2) ** 0.5
vector3 = (seg1_end - seg1_start) / \
sum((seg1_end - seg1_start) ** 2) ** 0.5
vector4 = (p_intersect.flatten() - seg1_start) / \
sum((p_intersect.flatten() - seg1_start) ** 2) ** 0.5
return p_intersect, vector1, vector2, vector3, vector4
def directp(node0, node1, node2, node3, hypocenter, reference, pp):
"""
Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no.
"""
# Find the intersection point Pd, by checking if the PdPh share the
# same vector with PpPh, and PpPh >= PdPh
# Transform to xyz coordinate
node0_xyz = get_xyz_from_ll(node0, reference)
node1_xyz = get_xyz_from_ll(node1, reference)
node2_xyz = get_xyz_from_ll(node2, reference)
node3_xyz = get_xyz_from_ll(node3, reference)
hypocenter_xyz = get_xyz_from_ll(hypocenter, reference)
hypocenter_xyz = np.array(hypocenter_xyz).flatten()
pp_xyz = pp
e = []
# Loop each segments on the patch to find Pd
segment_s = [node0_xyz, node1_xyz, node2_xyz, node3_xyz]
segment_e = [node1_xyz, node2_xyz, node3_xyz, node0_xyz]
# set buffering bu
buf = 0.0001
atol = 0.0001
loop = True
exit_flag = False
looptime = 0.
while loop:
x_min = np.min(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) - buf
x_max = np.max(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) + buf
y_min = np.min(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) - buf
y_max = np.max(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) + buf
n_seg = 0
exit_flag = False
for (seg_s, seg_e) in zip(segment_s, segment_e):
seg_s = np.array(seg_s).flatten()
seg_e = np.array(seg_e).flatten()
p_intersect, vector1, vector2, vector3, vector4 = _intersection(
seg_s, seg_e, pp_xyz, hypocenter_xyz)
ppph = dst.pdist([pp, hypocenter_xyz])
pdph = dst.pdist([p_intersect.flatten(), hypocenter_xyz])
n_seg = n_seg + 1
# Check that the direction of the hyp-pp and hyp-pd vectors
# have are the same.
if (np.allclose(vector1.flatten(), vector2,
atol=atol, rtol=0.)):
if ((np.allclose(vector3.flatten(), vector4, atol=atol,
rtol=0.))):
# Check if ppph >= pdph.
if (ppph >= pdph):
if (p_intersect[0] >= x_min) & (p_intersect[0] <=
x_max):
if (p_intersect[1] >= y_min) & (p_intersect[1]
<= y_max):
e = pdph
pd = p_intersect
exit_flag = True
break
# when the pp located within the fault rupture plane, e = ppph
if not e:
if (pp_xyz[0] >= x_min) & (pp_xyz[0] <= x_max):
if (pp_xyz[1] >= y_min) & (pp_xyz[1] <= y_max):
pd = pp_xyz
e = ppph
exit_flag = True
if exit_flag:
break
if not e:
looptime += 1
atol = 0.0001 * looptime
buf = 0.0001 * looptime
# if pd is located at 2nd fault segment, then the DPP calculation will
# keep going on the next fault patch
if n_seg == 2:
go_next_patch = True
else:
go_next_patch = False
return pd, e, go_next_patch
| agpl-3.0 | -4,038,285,033,445,288,400 | 37.64876 | 79 | 0.599326 | false |
MySportsFeeds/mysportsfeeds-python | ohmysportsfeedspy/v1_0.py | 1 | 7330 | import os
import csv
import requests
from datetime import datetime
import simplejson as json
import platform
import base64
import ohmysportsfeedspy
# API class for dealing with v1.0 of the API
class API_v1_0(object):
# Constructor
def __init__(self, verbose, store_type=None, store_location=None):
self.base_url = "https://api.mysportsfeeds.com/v1.0/pull"
self.headers = {
'Accept-Encoding': 'gzip',
'User-Agent': 'MySportsFeeds Python/{} ({})'.format(ohmysportsfeedspy.__version__, platform.platform())
}
self.verbose = verbose
self.store_type = store_type
self.store_location = store_location
self.valid_feeds = [
'cumulative_player_stats',
'full_game_schedule',
'daily_game_schedule',
'daily_player_stats',
'game_boxscore',
'scoreboard',
'game_playbyplay',
'player_gamelogs',
'team_gamelogs',
'roster_players',
'game_startinglineup',
'active_players',
'overall_team_standings',
'conference_team_standings',
'division_team_standings',
'playoff_team_standings',
'player_injuries',
'daily_dfs',
'current_season',
'latest_updates',
]
# Verify a feed
def __verify_feed(self, feedName):
is_valid = False
for feed in self.valid_feeds:
if feed == feedName:
is_valid = True
break
return is_valid
# Verify output format
def __verify_format(self, format):
is_valid = True
if format != 'json' and format != 'xml' and format != 'csv':
is_valid = False
return is_valid
# Feed URL
def determine_url(self, league, season, feed, output_format, params):
if feed == "current_season":
return "{base_url}/{league}/{feed}.{output}".format(base_url=self.base_url, feed=feed, league=league, season=season, output=output_format)
else:
return "{base_url}/{league}/{season}/{feed}.{output}".format(base_url=self.base_url, feed=feed, league=league, season=season, output=output_format)
# Generate the appropriate filename for a feed request
def __make_output_filename(self, league, season, feed, output_format, params):
filename = "{feed}-{league}-{season}".format(league=league.lower(),
season=season,
feed=feed)
if "gameid" in params:
filename += "-" + params["gameid"]
if "fordate" in params:
filename += "-" + params["fordate"]
filename += "." + output_format
return filename
# Save a feed response based on the store_type
def __save_feed(self, response, league, season, feed, output_format, params):
# Save to memory regardless of selected method
if output_format == "json":
store_output = response.json()
elif output_format == "xml":
store_output = response.text
elif output_format == "csv":
#store_output = response.content.split('\n')
store_output = response.content.decode('utf-8')
store_output = csv.reader(store_output.splitlines(), delimiter=',')
store_output = list(store_output)
if self.store_type == "file":
if not os.path.isdir(self.store_location):
os.mkdir(self.store_location)
filename = self.__make_output_filename(league, season, feed, output_format, params)
with open(self.store_location + filename, "w") as outfile:
if output_format == "json": # This is JSON
json.dump(store_output, outfile)
elif output_format == "xml": # This is xml
outfile.write(store_output)
elif output_format == "csv": # This is csv
writer = csv.writer(outfile)
for row in store_output:
writer.writerow([row])
else:
raise AssertionError("Could not interpret feed output format")
# Indicate this version does support BASIC auth
def supports_basic_auth(self):
return True
# Establish BASIC auth credentials
def set_auth_credentials(self, username, password):
self.auth = (username, password)
self.headers['Authorization'] = 'Basic ' + base64.b64encode('{}:{}'.format(username,password).encode('utf-8')).decode('ascii')
# Request data (and store it if applicable)
def get_data(self, **kwargs):
if not self.auth:
raise AssertionError("You must authenticate() before making requests.")
# establish defaults for all variables
league = ""
season = ""
feed = ""
output_format = ""
params = {}
# iterate over args and assign vars
for key, value in kwargs.items():
if str(key) == 'league':
league = value
elif str(key) == 'season':
if kwargs['feed'] == 'players':
params['season'] = value
else:
season = value
elif str(key) == 'feed':
feed = value
elif str(key) == 'format':
output_format = value
else:
params[key] = value
# add force=false parameter (helps prevent unnecessary bandwidth use)
if not "force" in params:
params['force'] = 'false'
if self.__verify_feed(feed) == False:
raise ValueError("Unknown feed '" + feed + "'. Known values are: " + str(self.valid_feeds))
if self.__verify_format(output_format) == False:
raise ValueError("Unsupported format '" + output_format + "'.")
url = self.determine_url(league, season, feed, output_format, params)
if self.verbose:
print("Making API request to '{}'.".format(url))
print(" with headers:")
print(self.headers)
print(" and params:")
print(params)
r = requests.get(url, params=params, headers=self.headers)
if r.status_code == 200:
if self.store_type != None:
self.__save_feed(r, league, season, feed, output_format, params)
if output_format == "json":
data = json.loads(r.content)
elif output_format == "xml":
data = str(r.content)
else:
data = r.content.splitlines()
elif r.status_code == 304:
if self.verbose:
print("Data hasn't changed since last call")
filename = self.__make_output_filename(league, season, feed, output_format, params)
with open(self.store_location + filename) as f:
if output_format == "json":
data = json.load(f)
elif output_format == "xml":
data = str(f.readlines()[0])
else:
data = f.read().splitlines()
else:
raise Warning("API call failed with error:", r.status_code)
return data
| mit | 4,933,645,844,157,687,000 | 33.413146 | 159 | 0.54543 | false |
ua-snap/downscale | snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/downscaled_data_to_netcdf_epscor_se.py | 1 | 9999 | # convert the downscaled data archive
def run( x ):
''' simple wrapper to open and return a 2-D array from a geotiff '''
import rasterio
return rasterio.open(x).read(1)
def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(fn.split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
# seasonal calculations
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
# def cf_attrs( scenario, model, contact='Michael Lindgren - [email protected]', ):
# '''
# generate the cf_metadata convention attributes for the NC file
# CONVENTION SPEC HERE:
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html
# '''
# {'institution': 'Scenarios Network for Alaska + Arctic Planning' ,
# 'institute_id': 'SNAP',
# 'experiment_id':scenario,
# 'source':model,
# 'model_id':model,
# 'forcing':,
# 'parent_experiment_id': ,
# 'parent_experiment_rip': ,
# 'branch_time': ,
# 'contact':contact,
# 'references': ,
# 'initialization_method': ,
# 'physics_version': ,
# 'tracking_id': ,
# 'acknowledgements': ,
# 'cesm_casename': ,
# 'cesm_repotag': ,
# 'cesm_compset': ,
# 'resolution': ,
# 'forcing_note': ,
# 'processed_by': ,
# 'processing_code_information': ,
# 'product': ,
# 'experiment': ,
# 'frequency': ,
# 'creation_date': ,
# 'history': ,
# 'Conventions':'CF-1.6' ,
# 'project_id': ,
# 'table_id': ,
# 'title': ,
# 'parent_experiment': ,
# 'modeling_realm': ,
# 'realization': ,
# 'cmor_version': }
def generate_nc( model, variable, scenario, base_path, output_base_path, begin, end ):
'''
main function to output a netcdf file from a group of
GeoTiff files of downscaled SNAP data.
[MORE DOCS TO COME]
'''
# from pathos.multiprocessing import Pool
from multiprocessing import Pool
import numpy as np
import pandas as pd
import os, glob, rasterio, time, itertools
import xarray as xr
print( 'working on: {} {} {}'.format( variable, model, scenario ) )
# set up pathing
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_base_path, model, scenario, variable )
try: # try:except to overcome some multiprocessing collision issues
if not os.path.exists( output_path ):
os.makedirs( output_path )
except:
pass
# list the data
l = sort_files( glob.glob( os.path.join( input_path, '*.tif' ) ) )
l = only_years( l, begin=begin, end=end )
# open a pool and turn the list of arrays into an ndarray
pool = Pool( ncpus )
arr = np.array( pool.map( run, l ) )
pool.close()
pool.join()
# mask it
arr = np.ma.masked_where( arr <= np.min( arr ), arr )
# [RECENT ADDITION] swap the axes so we are (lat, lon, time)
arr = np.swapaxes( np.swapaxes(arr, 0, 2), 0, 1)
# get the lons and lats for the NetCDF
lons, lats = coordinates( l[0] )
rst = rasterio.open( l[0] )
# THIS IS A TEST AREA FOR PRODUCING THE *_bnds variables -- NOT IMPLEMENTED
# # the res is standard in both directions.
# res = 2000.0
# half_res = 2000.0 / 2
# lon_bnds = [ [i-half_res,i+half_res ] for i in lons.ravel() ]
# # the lat_bnds variable appears to be the same as the above, but it is
# # forced to the extent of the map so the lat_bnds at the top and bottom are
# # different resolution (half) of the remainder of the rectilinear grid cells.
# # this needs to be taken into account in this calculation.
# # MAYBE JUST HOLD IT TO THE EXTENT FOR THESE LATITUDES?
# lat_bnds = [ [i-half_res,i+half_res ] for i in lats.ravel() ]
# lat_mins, lat_max = rst.bounds
# get some time and date stuff
t = time.time()
# OGC WKT for EPSG:3338 which is the CF standard.
crs_wkt = 'PROJCS["NAD83 / Alaska Albers",GEOGCS["NAD83",DATUM["North_American_Datum_1983",\
SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],\
AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],\
AUTHORITY["EPSG","4269"]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["standard_parallel_1",55],\
PARAMETER["standard_parallel_2",65],PARAMETER["latitude_of_center",50],PARAMETER["longitude_of_center",-154],\
PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],\
AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3338"]]'
# create the dataset in xarray
ds = xr.Dataset( { variable:(['x','y','time'], arr) },
coords={ 'lon': (['x', 'y'], lons),
'lat': (['x', 'y'], lats),
'time': pd.date_range( str(begin), str(end + 1), freq='M' ) },
attrs={ 'units':'Celcius', 'time_interval':'monthly',
'variable':variable, 'model':model, 'scenario':scenario,
'crs_wkt':crs_wkt,
'creation_date':time.ctime( t ), 'creation_date_UTC':t,
'created by':'Michael Lindgren - [email protected]',
'nodata_value':'-3.39999995e+38',
'cell_resolution':'2000 meters' } )
# write it out to disk
encoding = { variable: { '_FillValue':-3.39999995e+38, 'zlib':True } }
output_filename = os.path.join( output_path, '_'.join([ variable, model, scenario, str( begin ), str( end ) ]) + '.nc' )
ds.to_netcdf( output_filename, mode='w', encoding=encoding )
ds.close() # close it
return output_filename
if __name__ == '__main__':
import os, glob
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
args = parser.parse_args()
# setup args
base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5'
output_base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5_netcdf'
units = 'C'
time_interval = 'monthly'
ncpus = 32
if args.scenario == 'historical':
begin = 1900
end = 2005
else:
begin = 2006
end = 2100
# main
_ = generate_nc( args.model, args.variable, args.scenario, base_path, output_base_path, begin, end )
| mit | 2,371,635,134,616,050,000 | 36.037037 | 137 | 0.667767 | false |
apache/airflow | tests/plugins/test_plugins_manager.py | 2 | 14930 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
import logging
import os
import sys
import tempfile
from unittest import mock
import pytest
from airflow.hooks.base import BaseHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_plugins import mock_plugin_manager
importlib_metadata_string = 'importlib_metadata'
try:
import importlib_metadata
# If importlib_metadata is installed, it takes precedence over built-in importlib.metadata in PY39
# so we should use the default declared above
except ImportError:
try:
import importlib.metadata
# only when we do not have importlib_metadata, the importlib.metadata is actually used
importlib_metadata = 'importlib.metadata'
except ImportError:
raise Exception(
"Either importlib_metadata must be installed or importlib.metadata must be"
" available in system libraries (Python 3.9+). We seem to have neither."
)
ON_LOAD_EXCEPTION_PLUGIN = """
from airflow.plugins_manager import AirflowPlugin
class AirflowTestOnLoadExceptionPlugin(AirflowPlugin):
name = 'preload'
def on_load(self, *args, **kwargs):
raise Exception("oops")
"""
class TestPluginsRBAC:
@pytest.fixture(autouse=True)
def _set_attrs(self, app):
self.app = app
self.appbuilder = app.appbuilder
def test_flaskappbuilder_views(self):
from tests.plugins.test_plugin import v_appbuilder_package
appbuilder_class_name = str(v_appbuilder_package['view'].__class__.__name__)
plugin_views = [
view for view in self.appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
# view should have a menu item matching category of v_appbuilder_package
links = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == v_appbuilder_package['category']
]
assert len(links) == 1
# menu link should also have a link matching the name of the package.
link = links[0]
assert link.name == v_appbuilder_package['category']
assert link.childs[0].name == v_appbuilder_package['name']
def test_flaskappbuilder_menu_links(self):
from tests.plugins.test_plugin import appbuilder_mitem, appbuilder_mitem_toplevel
# menu item (category) should exist matching appbuilder_mitem.category
categories = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem['category']
]
assert len(categories) == 1
# menu link should be a child in the category
category = categories[0]
assert category.name == appbuilder_mitem['category']
assert category.childs[0].name == appbuilder_mitem['name']
assert category.childs[0].href == appbuilder_mitem['href']
# a top level link isn't nested in a category
top_levels = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem_toplevel['name']
]
assert len(top_levels) == 1
link = top_levels[0]
assert link.href == appbuilder_mitem_toplevel['href']
assert link.label == appbuilder_mitem_toplevel['label']
def test_app_blueprints(self):
from tests.plugins.test_plugin import bp
# Blueprint should be present in the app
assert 'test_plugin' in self.app.blueprints
assert self.app.blueprints['test_plugin'].name == bp.name
def test_flaskappbuilder_nomenu_views():
from tests.plugins.test_plugin import v_nomenu_appbuilder_package
class AirflowNoMenuViewsPlugin(AirflowPlugin):
appbuilder_views = [v_nomenu_appbuilder_package]
appbuilder_class_name = str(v_nomenu_appbuilder_package['view'].__class__.__name__)
with mock_plugin_manager(plugins=[AirflowNoMenuViewsPlugin()]):
appbuilder = application.create_app(testing=True).appbuilder
plugin_views = [view for view in appbuilder.baseviews if view.blueprint.name == appbuilder_class_name]
assert len(plugin_views) == 1
class TestPluginsManager:
def test_no_log_when_no_plugins(self, caplog):
with mock_plugin_manager(plugins=[]):
from airflow import plugins_manager
plugins_manager.ensure_plugins_loaded()
assert caplog.record_tuples == []
def test_should_load_plugins_from_property(self, caplog):
class AirflowTestPropertyPlugin(AirflowPlugin):
name = "test_property_plugin"
@property
def hooks(self):
class TestPropertyHook(BaseHook):
pass
return [TestPropertyHook]
with mock_plugin_manager(plugins=[AirflowTestPropertyPlugin()]):
from airflow import plugins_manager
caplog.set_level(logging.DEBUG, "airflow.plugins_manager")
plugins_manager.ensure_plugins_loaded()
assert 'AirflowTestPropertyPlugin' in str(plugins_manager.plugins)
assert 'TestPropertyHook' in str(plugins_manager.registered_hooks)
assert caplog.records[-1].levelname == 'DEBUG'
assert caplog.records[-1].msg == 'Loading %d plugin(s) took %.2f seconds'
def test_loads_filesystem_plugins(self, caplog):
from airflow import plugins_manager
with mock.patch('airflow.plugins_manager.plugins', []):
plugins_manager.load_plugins_from_plugin_directory()
assert 5 == len(plugins_manager.plugins)
for plugin in plugins_manager.plugins:
if 'AirflowTestOnLoadPlugin' not in str(plugin):
continue
assert 'postload' == plugin.name
break
else:
pytest.fail("Wasn't able to find a registered `AirflowTestOnLoadPlugin`")
assert caplog.record_tuples == []
def test_loads_filesystem_plugins_exception(self, caplog):
from airflow import plugins_manager
with mock.patch('airflow.plugins_manager.plugins', []):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'testplugin.py'), "w") as f:
f.write(ON_LOAD_EXCEPTION_PLUGIN)
with conf_vars({('core', 'plugins_folder'): tmpdir}):
plugins_manager.load_plugins_from_plugin_directory()
assert plugins_manager.plugins == []
received_logs = caplog.text
assert 'Failed to import plugin' in received_logs
assert 'testplugin.py' in received_logs
def test_should_warning_about_incompatible_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == [
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_admin_views_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_menu_links_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
]
def test_should_not_warning_about_fab_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_should_not_warning_about_fab_and_flask_admin_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_entrypoint_plugin_errors_dont_raise_exceptions(self, caplog):
"""
Test that Airflow does not raise an error if there is any Exception because of a plugin.
"""
from airflow.plugins_manager import import_errors, load_entrypoint_plugins
mock_dist = mock.Mock()
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint'
mock_entrypoint.group = 'airflow.plugins'
mock_entrypoint.module = 'test.plugins.test_plugins_manager'
mock_entrypoint.load.side_effect = ImportError('my_fake_module not found')
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(
f'{importlib_metadata_string}.distributions', return_value=[mock_dist]
), caplog.at_level(logging.ERROR, logger='airflow.plugins_manager'):
load_entrypoint_plugins()
received_logs = caplog.text
# Assert Traceback is shown too
assert "Traceback (most recent call last):" in received_logs
assert "my_fake_module not found" in received_logs
assert "Failed to import plugin test-entrypoint" in received_logs
assert ("test.plugins.test_plugins_manager", "my_fake_module not found") in import_errors.items()
def test_registering_plugin_macros(self, request):
"""
Tests whether macros that originate from plugins are being registered correctly.
"""
from airflow import macros
from airflow.plugins_manager import integrate_macros_plugins
def cleanup_macros():
"""Reloads the airflow.macros module such that the symbol table is reset after the test."""
# We're explicitly deleting the module from sys.modules and importing it again
# using import_module() as opposed to using importlib.reload() because the latter
# does not undo the changes to the airflow.macros module that are being caused by
# invoking integrate_macros_plugins()
del sys.modules['airflow.macros']
importlib.import_module('airflow.macros')
request.addfinalizer(cleanup_macros)
def custom_macro():
return 'foo'
class MacroPlugin(AirflowPlugin):
name = 'macro_plugin'
macros = [custom_macro]
with mock_plugin_manager(plugins=[MacroPlugin()]):
# Ensure the macros for the plugin have been integrated.
integrate_macros_plugins()
# Test whether the modules have been created as expected.
plugin_macros = importlib.import_module(f"airflow.macros.{MacroPlugin.name}")
for macro in MacroPlugin.macros:
# Verify that the macros added by the plugin are being set correctly
# on the plugin's macro module.
assert hasattr(plugin_macros, macro.__name__)
# Verify that the symbol table in airflow.macros has been updated with an entry for
# this plugin, this is necessary in order to allow the plugin's macros to be used when
# rendering templates.
assert hasattr(macros, MacroPlugin.name)
class TestPluginsDirectorySource:
def test_should_return_correct_path_name(self):
from airflow import plugins_manager
source = plugins_manager.PluginsDirectorySource(__file__)
assert "test_plugins_manager.py" == source.path
assert "$PLUGINS_FOLDER/test_plugins_manager.py" == str(source)
assert "<em>$PLUGINS_FOLDER/</em>test_plugins_manager.py" == source.__html__()
class TestEntryPointSource:
def test_should_return_correct_source_details(self):
from airflow import plugins_manager
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint-plugin'
mock_entrypoint.module = 'module_name_plugin'
mock_dist = mock.Mock()
mock_dist.metadata = {'name': 'test-entrypoint-plugin'}
mock_dist.version = '1.0.0'
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata_string}.distributions', return_value=[mock_dist]):
plugins_manager.load_entrypoint_plugins()
source = plugins_manager.EntryPointSource(mock_entrypoint, mock_dist)
assert str(mock_entrypoint) == source.entrypoint
assert "test-entrypoint-plugin==1.0.0: " + str(mock_entrypoint) == str(source)
assert "<em>test-entrypoint-plugin==1.0.0:</em> " + str(mock_entrypoint) == source.__html__()
| apache-2.0 | -9,114,536,456,285,706,000 | 37.981723 | 110 | 0.649565 | false |
danielholmstrom/nose-pyversion | setup.py | 1 | 1770 | """
~~~~~~~~~~~~~~~~~~~~~
Nose-Pyversion-Plugin
~~~~~~~~~~~~~~~~~~~~~
"""
import os
import sys
from setuptools import find_packages, setup
# Required for nose.collector, see http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except ImportError:
pass
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
# Requirements for the package
install_requires = [
'nose',
]
# Requirement for running tests
test_requires = install_requires
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='Nose-PyVersion',
version='0.1b1',
description="Nose plugin for excluding files based on python version",
long_description=README,
url='http://github.com/danielholmstrom/nose-pyversion/',
license='MIT',
author='Daniel Holmstrom',
author_email='[email protected]',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: '
'Libraries :: Python Modules'],
py_modules=['nose_pyversion'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=test_requires,
test_suite='nose.collector',
entry_points="""
[nose.plugins]
pyversion=nose_pyversion:PyVersion
""",
**extra
)
| mit | -7,525,720,973,531,597,000 | 27.548387 | 78 | 0.596045 | false |
egbertbouman/tribler-g | Tribler/Tools/dirtrackerseeder.py | 1 | 6798 | # Written by Arno Bakker
# see LICENSE.txt for license information
#
# Razvan Deaconescu, 2008:
# * corrected problem when running in background
# * added usage and print_version functions
# * uses getopt for command line argument parsing
import sys
import shutil
import time
import tempfile
import random
import os
import getopt
from traceback import print_exc
from Tribler.__init__ import LIBRARYNAME
from Tribler.Core.API import *
from Tribler.Core.BitTornado.__init__ import version, report_email
MAXUPLOAD = 1000 # KB/s or None
checkpointedwhenseeding = False
sesjun = None
def usage():
print "Usage: python dirseeder.py [options] directory"
print "Options:"
print "\t--port <port>"
print "\t-p <port>\t\tuse <port> to listen for connections"
print "\t\t\t\t(default is random value)"
print "\tdirectory (default is current)"
print "\t--seeder\t\t\tseeder only"
print "\t--version"
print "\t-v\t\t\tprint version and exit"
print "\t--help"
print "\t-h\t\t\tprint this help screen"
print
print "Report bugs to <" + report_email + ">"
def print_version():
print version, "<" + report_email + ">"
def states_callback(dslist):
allseeding = True
for ds in dslist:
state_callback(ds)
if ds.get_status() != DLSTATUS_SEEDING:
allseeding = False
global checkpointedwhenseeding
global sesjun
if len(dslist) > 0 and allseeding and not checkpointedwhenseeding:
checkpointedwhenseeding = True
print >>sys.stderr,"All seeding, checkpointing Session to enable quick restart"
sesjun.checkpoint()
return (1.0, False)
def state_callback(ds):
d = ds.get_download()
# print >>sys.stderr,`d.get_def().get_name()`,dlstatus_strings[ds.get_status()],ds.get_progress(),"%",ds.get_error(),"up",ds.get_current_speed(UPLOAD),"down",ds.get_current_speed(DOWNLOAD)
print >>sys.stderr, '%s %s %5.2f%% %s up %8.2fKB/s down %8.2fKB/s' % \
(`d.get_def().get_name()`, \
dlstatus_strings[ds.get_status()], \
ds.get_progress() * 100, \
ds.get_error(), \
ds.get_current_speed(UPLOAD), \
ds.get_current_speed(DOWNLOAD))
return (1.0, False)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:", ["help", "version", "port", "seeder"])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
# init to default values
port = 6969
tracking = True
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-p", "--port"):
port = int(a)
elif o in ("-p", "--port"):
port = int(a)
elif o in ("--seeder"):
tracking = False
elif o in ("-v", "--version"):
print_version()
sys.exit(0)
else:
assert False, "unhandled option"
if len(args) > 1:
print "Too many arguments"
usage()
sys.exit(2)
elif len(args) == 0:
torrentsdir = os.getcwd()
else:
torrentsdir = os.path.abspath(args[0])
print "Press Ctrl-C or send SIGKILL or WM_DESTROY to stop seeding"
# setup session
sscfg = SessionStartupConfig()
statedir = os.path.join(torrentsdir,"."+LIBRARYNAME)
sscfg.set_state_dir(statedir)
sscfg.set_listen_port(port)
sscfg.set_megacache(False)
sscfg.set_overlay(False)
sscfg.set_dialback(False)
if tracking:
sscfg.set_internal_tracker(True)
# log full
logfilename = "tracker-"+str(int(time.time()))+".log"
sscfg.set_tracker_logfile(logfilename)
sscfg.set_tracker_log_nat_checks(True)
s = Session(sscfg)
global sesjun
sesjun = s
s.set_download_states_callback(states_callback, getpeerlist=False)
# Restore previous Session
s.load_checkpoint()
# setup and start downloads
dscfg = DownloadStartupConfig()
dscfg.set_dest_dir(torrentsdir)
# Arno, 2010-04-16: STBSPEED: complete BITFIELDS are processed much faster
dscfg.set_breakup_seed_bitfield(False)
if MAXUPLOAD is not None:
dscfg.set_max_speed(UPLOAD,MAXUPLOAD)
##dscfg.set_max_uploads(32)
#
# Scan dir, until exit by CTRL-C (or any other signal/interrupt)
#
try:
while True:
try:
print >>sys.stderr,"Rescanning",`torrentsdir`
for torrent_file in os.listdir(torrentsdir):
if torrent_file.endswith(".torrent") or torrent_file.endswith(".tstream") or torrent_file.endswith(".url"):
print >>sys.stderr,"Found file",`torrent_file`
tfullfilename = os.path.join(torrentsdir,torrent_file)
if torrent_file.endswith(".url"):
f = open(tfullfilename,"rb")
url = f.read()
f.close()
tdef = TorrentDef.load_from_url(url)
else:
tdef = TorrentDef.load(tfullfilename)
# See if already running:
dlist = s.get_downloads()
existing = False
for d in dlist:
existinfohash = d.get_def().get_infohash()
if existinfohash == tdef.get_infohash():
existing = True
break
if existing:
print >>sys.stderr,"Ignoring existing Download",`tdef.get_name()`
if MAXUPLOAD is not None:
d.set_max_speed(UPLOAD,MAXUPLOAD)
else:
if tracking:
s.add_to_internal_tracker(tdef)
d = s.start_download(tdef, dscfg)
# Checkpoint again when new are seeding
global checkpointedwhenseeding
checkpointedwhenseeding = False
except KeyboardInterrupt,e:
raise e
except Exception, e:
print_exc()
time.sleep(30.0)
except Exception, e:
print_exc()
if __name__ == "__main__":
main()
| lgpl-2.1 | -6,706,108,623,727,171,000 | 32.333333 | 191 | 0.521624 | false |
pstiasny/bigos | bigos/__init__.py | 1 | 1522 | #/bin/env python2
# encoding: utf8
__version__ = '0.0.3'
import re
import itertools
from bigos.backend import generate_events
watchlist = []
class EventHandler:
def __init__(self, function, regex, dirs=False, types=['created', 'modified']):
'''
:param function: function to run when the event is matched
:param regex: regular expression string to match the
path against
:param dirs: should the handler be run for directory events,
None to run for both dirs and files
:param types: list of types of events to match, or None for
any event
'''
self.f = function
self.regex = re.compile(regex)
self.dirs = dirs
self.types = types
def match(self, ev):
dir_match = self.dirs is None or (ev.is_dir == self.dirs)
types_match = self.types is None or (ev.type in self.types)
return dir_match and types_match and self.regex.match(ev.path)
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
def on(*args, **kwargs):
def decorate(f):
watchlist.append(EventHandler(f, *args, **kwargs))
return f
return decorate
def handle_event(watchlist, ev):
for handler in watchlist:
if handler.match(ev):
handler(ev)
def main(dirpath, watchlist=watchlist):
for ev in itertools.chain.from_iterable(generate_events(dirpath)):
handle_event(watchlist, ev)
| gpl-3.0 | 4,430,752,800,128,339,500 | 28.843137 | 83 | 0.603811 | false |
blooparksystems/odoo | addons/account/models/chart_template.py | 1 | 43914 | # -*- coding: utf-8 -*-
import time
import math
from openerp.osv import expression
from openerp.tools.float_utils import float_round as round
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import AccessError, UserError, ValidationError
import openerp.addons.decimal_precision as dp
from openerp import api, fields, models, _
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class AccountAccountTemplate(models.Model):
_name = "account.account.template"
_description = 'Templates for Accounts'
_order = "code"
name = fields.Char(required=True, index=True)
currency_id = fields.Many2one('res.currency', string='Account Currency', help="Forces all moves for this account to have this secondary currency.")
code = fields.Char(size=64, required=True, index=True)
user_type_id = fields.Many2one('account.account.type', string='Type', required=True, oldname='user_type',
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities.")
reconcile = fields.Boolean(string='Allow Invoices & payments Matching', default=False,
help="Check this option if you want the user to reconcile entries in this account.")
note = fields.Text()
tax_ids = fields.Many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', string='Default Taxes')
nocreate = fields.Boolean(string='Optional Create', default=False,
help="If checked, the new chart of accounts will not contain this by default.")
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template',
help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you "
"to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times).")
tag_ids = fields.Many2many('account.account.tag', 'account_account_template_account_tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
@api.multi
@api.depends('name', 'code')
def name_get(self):
res = []
for record in self:
name = record.name
if record.code:
name = record.code + ' ' + name
res.append((record.id, name))
return res
class AccountChartTemplate(models.Model):
_name = "account.chart.template"
_description = "Templates for Account Chart"
name = fields.Char(required=True)
company_id = fields.Many2one('res.company', string='Company')
parent_id = fields.Many2one('account.chart.template', string='Parent Chart Template')
code_digits = fields.Integer(string='# of Digits', required=True, default=6, help="No. of Digits to use for account code")
visible = fields.Boolean(string='Can be Visible?', default=True,
help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from "
"templates, this is useful when you want to generate accounts of this template only when loading its child template.")
currency_id = fields.Many2one('res.currency', string='Currency', required=True)
use_anglo_saxon = fields.Boolean(string="Use Anglo-Saxon accounting", default=False)
complete_tax_set = fields.Boolean(string='Complete Set of Taxes', default=True,
help="This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list "
"of taxes. This last choice assumes that the set of tax defined on this template is complete")
account_ids = fields.One2many('account.account.template', 'chart_template_id', string='Associated Account Templates')
tax_template_ids = fields.One2many('account.tax.template', 'chart_template_id', string='Tax Template List',
help='List of all the taxes that have to be installed by the wizard')
bank_account_code_prefix = fields.Char(string='Prefix of the bank accounts', oldname="bank_account_code_char")
cash_account_code_prefix = fields.Char(string='Prefix of the main cash accounts')
transfer_account_id = fields.Many2one('account.account.template', string='Transfer Account', required=True,
domain=lambda self: [('reconcile', '=', True), ('user_type_id.id', '=', self.env.ref('account.data_account_type_current_assets').id)],
help="Intermediary account used when moving money from a liquidity account to another")
income_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Gain Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
expense_currency_exchange_account_id = fields.Many2one('account.account.template',
string="Loss Exchange Rate Account", domain=[('internal_type', '=', 'other'), ('deprecated', '=', False)])
property_account_receivable_id = fields.Many2one('account.account.template', string='Receivable Account', oldname="property_account_receivable")
property_account_payable_id = fields.Many2one('account.account.template', string='Payable Account', oldname="property_account_payable")
property_account_expense_categ_id = fields.Many2one('account.account.template', string='Category of Expense Account', oldname="property_account_expense_categ")
property_account_income_categ_id = fields.Many2one('account.account.template', string='Category of Income Account', oldname="property_account_income_categ")
property_account_expense_id = fields.Many2one('account.account.template', string='Expense Account on Product Template', oldname="property_account_expense")
property_account_income_id = fields.Many2one('account.account.template', string='Income Account on Product Template', oldname="property_account_income")
property_stock_account_input_categ_id = fields.Many2one('account.account.template', string="Input Account for Stock Valuation", oldname="property_stock_account_input_categ")
property_stock_account_output_categ_id = fields.Many2one('account.account.template', string="Output Account for Stock Valuation", oldname="property_stock_account_output_categ")
property_stock_valuation_account_id = fields.Many2one('account.account.template', string="Account Template for Stock Valuation")
@api.one
def try_loading_for_current_company(self):
self.ensure_one()
company = self.env.user.company_id
# If we don't have any chart of account on this company, install this chart of account
if not company.chart_template_id:
wizard = self.env['wizard.multi.charts.accounts'].create({
'company_id': self.env.user.company_id.id,
'chart_template_id': self.id,
'code_digits': self.code_digits,
'transfer_account_id': self.transfer_account_id.id,
'currency_id': self.currency_id.id,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
})
wizard.onchange_chart_template_id()
wizard.execute()
@api.multi
def open_select_template_wizard(self):
# Add action to open wizard to select between several templates
if not self.company_id.chart_template_id:
todo = self.env['ir.actions.todo']
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_wizard_multi_chart')
if action_rec:
todo.create({'action_id': action_rec.id, 'name': _('Choose Accounting Template'), 'type': 'automatic'})
return True
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
JournalObj = self.env['account.journal']
for vals_journal in self._prepare_all_journals(acc_template_ref, company, journals_dict=journals_dict):
journal = JournalObj.create(vals_journal)
if vals_journal['type'] == 'general' and vals_journal['code'] == _('EXCH'):
company.write({'currency_exchange_journal_id': journal.id})
return True
@api.multi
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
def _get_default_account(journal_vals, type='debit'):
# Get the default accounts
default_account = False
if journal['type'] == 'sale':
default_account = acc_template_ref.get(self.property_account_income_categ_id.id)
elif journal['type'] == 'purchase':
default_account = acc_template_ref.get(self.property_account_expense_categ_id.id)
elif journal['type'] == 'general' and journal['code'] == _('EXCH'):
if type=='credit':
default_account = acc_template_ref.get(self.income_currency_exchange_account_id.id)
else:
default_account = acc_template_ref.get(self.expense_currency_exchange_account_id.id)
return default_account
journals = [{'name': _('Customer Invoices'), 'type': 'sale', 'code': _('INV'), 'favorite': True, 'sequence': 5},
{'name': _('Vendor Bills'), 'type': 'purchase', 'code': _('BILL'), 'favorite': True, 'sequence': 6},
{'name': _('Miscellaneous Operations'), 'type': 'general', 'code': _('MISC'), 'favorite': False, 'sequence': 7},
{'name': _('Exchange Difference'), 'type': 'general', 'code': _('EXCH'), 'favorite': False, 'sequence': 9},]
if journals_dict != None:
journals.extend(journals_dict)
self.ensure_one()
journal_data = []
for journal in journals:
vals = {
'type': journal['type'],
'name': journal['name'],
'code': journal['code'],
'company_id': company.id,
'default_credit_account_id': _get_default_account(journal, 'credit'),
'default_debit_account_id': _get_default_account(journal, 'debit'),
'refund_sequence': True,
'show_on_dashboard': journal['favorite'],
}
journal_data.append(vals)
return journal_data
@api.multi
def generate_properties(self, acc_template_ref, company):
"""
This method used for creating properties.
:param self: chart templates for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
self.ensure_one()
PropertyObj = self.env['ir.property']
todo_list = [
('property_account_receivable_id', 'res.partner', 'account.account'),
('property_account_payable_id', 'res.partner', 'account.account'),
('property_account_expense_categ_id', 'product.category', 'account.account'),
('property_account_income_categ_id', 'product.category', 'account.account'),
('property_account_expense_id', 'product.template', 'account.account'),
('property_account_income_id', 'product.template', 'account.account'),
]
for record in todo_list:
account = getattr(self, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = self.env['ir.model.fields'].search([('name', '=', record[0]), ('model', '=', record[1]), ('relation', '=', record[2])], limit=1)
vals = {
'name': record[0],
'company_id': company.id,
'fields_id': field.id,
'value': value,
}
properties = PropertyObj.search([('name', '=', record[0]), ('company_id', '=', company.id)])
if properties:
#the property exist: modify it
properties.write(vals)
else:
#create the property
PropertyObj.create(vals)
stock_properties = [
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for stock_property in stock_properties:
account = getattr(self, stock_property)
value = account and acc_template_ref[account.id] or False
if value:
company.write({stock_property: value})
return True
@api.multi
def _install_template(self, company, code_digits=None, transfer_account_id=None, obj_wizard=None, acc_ref=None, taxes_ref=None):
""" Recursively load the template objects and create the real objects from them.
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param transfer_account_id: reference to the account template that will be used as intermediary account for transfers between 2 liquidity accounts
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if self.parent_id:
tmp1, tmp2 = self.parent_id._install_template(company, code_digits=code_digits, transfer_account_id=transfer_account_id, acc_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tmp1, tmp2 = self._load_template(company, code_digits=code_digits, transfer_account_id=transfer_account_id, account_ref=acc_ref, taxes_ref=taxes_ref)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
return acc_ref, taxes_ref
@api.multi
def _load_template(self, company, code_digits=None, transfer_account_id=None, account_ref=None, taxes_ref=None):
""" Generate all the objects from the templates
:param company: company the wizard is running for
:param code_digits: number of digits the accounts code should have in the COA
:param transfer_account_id: reference to the account template that will be used as intermediary account for transfers between 2 liquidity accounts
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:returns: tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
:rtype: tuple(dict, dict, dict)
"""
self.ensure_one()
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if not code_digits:
code_digits = self.code_digits
if not transfer_account_id:
transfer_account_id = self.transfer_account_id
AccountTaxObj = self.env['account.tax']
# Generate taxes from templates.
generated_tax_res = self.tax_template_ids._generate_tax(company)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company)
account_ref.update(account_template_ref)
# writing account values after creation of accounts
company.transfer_account_id = account_template_ref[transfer_account_id.id]
for key, value in generated_tax_res['account_dict'].items():
if value['refund_account_id'] or value['account_id']:
AccountTaxObj.browse(key).write({
'refund_account_id': account_ref.get(value['refund_account_id'], False),
'account_id': account_ref.get(value['account_id'], False),
})
# Create Journals
self.generate_journals(account_ref, company)
# generate properties function
self.generate_properties(account_ref, company)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
self.generate_fiscal_position(taxes_ref, account_ref, company)
return account_ref, taxes_ref
@api.multi
def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company):
""" This method for generating accounts from templates.
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:param acc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
self.ensure_one()
account_tmpl_obj = self.env['account.account.template']
acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id')
for account_template in acc_template:
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits:
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False,
'reconcile': account_template.reconcile,
'note': account_template.note,
'tax_ids': [(6, 0, tax_ids)],
'company_id': company.id,
'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])],
}
new_account = self.env['account.account'].create(vals)
acc_template_ref[account_template.id] = new_account.id
return acc_template_ref
@api.multi
def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company):
""" This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
self.ensure_one()
positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)])
for position in positions:
new_fp = self.env['account.fiscal.position'].create({'company_id': company.id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
self.env['account.fiscal.position.tax'].create({
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp.id
})
for acc in position.account_ids:
self.env['account.fiscal.position.account'].create({
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp.id
})
return True
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_order = 'id'
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Tax Name', required=True)
type_tax_use = fields.Selection([('sale', 'Sales'), ('purchase', 'Purchases'), ('none', 'None')], string='Tax Scope', required=True, default="sale",
help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group.")
amount_type = fields.Selection(default='percent', string="Tax Computation", required=True,
selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')])
active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.")
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes')
sequence = fields.Integer(required=True, default=1,
help="The sequence field is used to define order in which the tax lines are applied.")
amount = fields.Float(required=True, digits=(16, 4))
account_id = fields.Many2one('account.account.template', string='Tax Account', ondelete='restrict',
help="Account that will be set on invoice tax lines for invoices. Leave empty to use the expense account.", oldname='account_collected_id')
refund_account_id = fields.Many2one('account.account.template', string='Tax Account on Refunds', ondelete='restrict',
help="Account that will be set on invoice tax lines for refunds. Leave empty to use the expense account.", oldname='account_paid_id')
description = fields.Char(string='Display on Invoices')
price_include = fields.Boolean(string='Included in Price', default=False,
help="Check this if the price you use on the product and invoices includes this tax.")
include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False,
help="If set, taxes which are computed after this one will be computed based on the price tax included.")
analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)")
tag_ids = fields.Many2many('account.account.tag', string='Account tag', help="Optional tags you may want to assign for custom reporting")
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, type_tax_use)', 'Tax names must be unique !'),
]
@api.multi
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company):
""" This method generates a dictionnary of all the values for the tax that will be created.
"""
self.ensure_one()
return {
'name': self.name,
'type_tax_use': self.type_tax_use,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'analytic': self.analytic,
'tag_ids': [(6, 0, [t.id for t in self.tag_ids])],
}
@api.multi
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
todo_dict = {}
tax_template_to_tax = {}
for tax in self:
# Compute children tax ids
children_ids = []
for child_tax in tax.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
vals_tax = tax._get_tax_vals(company)
vals_tax['children_tax_ids'] = children_ids and [(6, 0, children_ids)] or []
new_tax = self.env['account.tax'].create(vals_tax)
tax_template_to_tax[tax.id] = new_tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax.id] = {
'account_id': tax.account_id.id,
'refund_account_id': tax.refund_account_id.id,
}
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class WizardMultiChartsAccounts(models.TransientModel):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name = 'wizard.multi.charts.accounts'
_inherit = 'res.config'
company_id = fields.Many2one('res.company', string='Company', required=True)
currency_id = fields.Many2one('res.currency', string='Currency', help="Currency as per company's country.", required=True)
only_one_chart_template = fields.Boolean(string='Only One Chart Template Available')
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
bank_account_ids = fields.One2many('account.bank.accounts.wizard', 'bank_account_id', string='Cash and Banks', required=True, oldname="bank_accounts_id")
bank_account_code_prefix = fields.Char('Bank Accounts Prefix', oldname="bank_account_code_char")
cash_account_code_prefix = fields.Char('Cash Accounts Prefix')
code_digits = fields.Integer(string='# of Digits', required=True, help="No. of Digits to use for account code")
sale_tax_id = fields.Many2one('account.tax.template', string='Default Sales Tax', oldname="sale_tax")
purchase_tax_id = fields.Many2one('account.tax.template', string='Default Purchase Tax', oldname="purchase_tax")
sale_tax_rate = fields.Float(string='Sales Tax(%)')
use_anglo_saxon = fields.Boolean(string='Use Anglo-Saxon Accounting', related='chart_template_id.use_anglo_saxon')
transfer_account_id = fields.Many2one('account.account.template', required=True, string='Transfer Account',
domain=lambda self: [('reconcile', '=', True), ('user_type_id.id', '=', self.env.ref('account.data_account_type_current_assets').id)],
help="Intermediary account used when moving money from a liquidity account to another")
purchase_tax_rate = fields.Float(string='Purchase Tax(%)')
complete_tax_set = fields.Boolean('Complete Set of Taxes',
help="This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use "
"the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete")
@api.model
def _get_chart_parent_ids(self, chart_template):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
@api.onchange('sale_tax_rate')
def onchange_tax_rate(self):
self.purchase_tax_rate = self.sale_tax_rate or False
@api.onchange('chart_template_id')
def onchange_chart_template_id(self):
res = {}
tax_templ_obj = self.env['account.tax.template']
if self.chart_template_id:
currency_id = self.chart_template_id.currency_id and self.chart_template_id.currency_id.id or self.env.user.company_id.currency_id.id
self.complete_tax_set = self.chart_template_id.complete_tax_set
self.currency_id = currency_id
if self.chart_template_id.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(self.chart_template_id)
base_tax_domain = [('chart_template_id', 'in', chart_ids)]
sale_tax_domain = base_tax_domain + [('type_tax_use', '=', 'sale')]
purchase_tax_domain = base_tax_domain + [('type_tax_use', '=', 'purchase')]
sale_tax = tax_templ_obj.search(sale_tax_domain, order="sequence, id desc", limit=1)
purchase_tax = tax_templ_obj.search(purchase_tax_domain, order="sequence, id desc", limit=1)
self.sale_tax_id = sale_tax.id
self.purchase_tax_id = purchase_tax.id
res.setdefault('domain', {})
res['domain']['sale_tax_id'] = repr(sale_tax_domain)
res['domain']['purchase_tax_id'] = repr(purchase_tax_domain)
if self.chart_template_id.transfer_account_id:
self.transfer_account_id = self.chart_template_id.transfer_account_id.id
if self.chart_template_id.code_digits:
self.code_digits = self.chart_template_id.code_digits
if self.chart_template_id.bank_account_code_prefix:
self.bank_account_code_prefix = self.chart_template_id.bank_account_code_prefix
if self.chart_template_id.cash_account_code_prefix:
self.cash_account_code_prefix = self.chart_template_id.cash_account_code_prefix
return res
@api.model
def default_get(self, fields):
context = self._context or {}
res = super(WizardMultiChartsAccounts, self).default_get(fields)
tax_templ_obj = self.env['account.tax.template']
account_chart_template = self.env['account.chart.template']
if 'bank_account_ids' in fields:
res.update({'bank_account_ids': [{'acc_name': _('Cash'), 'account_type': 'cash'}, {'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.env.user.company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company = self.env['res.company'].browse(company_id)
currency_id = company.on_change_country(company.country_id.id)['value']['currency_id']
res.update({'currency_id': currency_id})
chart_templates = account_chart_template.search([('visible', '=', True)])
if chart_templates:
#in order to set default chart which was last created set max of ids.
chart_id = max(chart_templates.ids)
if context.get("default_charts"):
model_data = self.env['ir.model.data'].search_read([('model', '=', 'account.chart.template'), ('module', '=', context.get("default_charts"))], ['res_id'])
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(chart_id)
chart_hierarchy_ids = self._get_chart_parent_ids(chart)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(chart_templates) == 1,
'chart_template_id': chart_id})
if 'sale_tax_id' in fields:
sale_tax = tax_templ_obj.search([('chart_template_id', 'in', chart_hierarchy_ids),
('type_tax_use', '=', 'sale')], limit=1, order='sequence')
res.update({'sale_tax_id': sale_tax and sale_tax.id or False})
if 'purchase_tax_id' in fields:
purchase_tax = tax_templ_obj.search([('chart_template_id', 'in', chart_hierarchy_ids),
('type_tax_use', '=', 'purchase')], limit=1, order='sequence')
res.update({'purchase_tax_id': purchase_tax and purchase_tax.id or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
context = self._context or {}
res = super(WizardMultiChartsAccounts, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=False)
cmp_select = []
CompanyObj = self.env['res.company']
companies = CompanyObj.search([])
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
self._cr.execute("SELECT company_id FROM account_account WHERE deprecated = 'f' AND name != 'Chart For Automated Tests' AND name NOT LIKE '%(test)'")
configured_cmp = [r[0] for r in self._cr.fetchall()]
unconfigured_cmp = list(set(companies.ids) - set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in CompanyObj.browse(unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
@api.one
def _create_tax_templates_from_rates(self, company_id):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax object accordingly to the provided sale/purchase rates.
Then it saves the new tax templates as default taxes to use for this chart template.
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_temp = self.env['account.tax.template']
all_parents = self._get_chart_parent_ids(self.chart_template_id)
# create tax templates from purchase_tax_rate and sale_tax_rate fields
if not self.chart_template_id.complete_tax_set:
value = self.sale_tax_rate
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'sale'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': value, 'name': _('Tax %.2f%%') % value})
value = self.purchase_tax_rate
ref_taxs = obj_tax_temp.search([('type_tax_use', '=', 'purchase'), ('chart_template_id', 'in', all_parents)], order="sequence, id desc", limit=1)
ref_taxs.write({'amount': value, 'name': _('Purchase Tax %.2f%%') % value})
return True
@api.multi
def execute(self):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the
accounting properties... accordingly for the chosen company.
'''
if not self.env.user._is_admin():
raise AccessError(_("Only administrators can change the settings"))
ir_values_obj = self.env['ir.values']
company = self.company_id
self.company_id.write({'currency_id': self.currency_id.id,
'accounts_code_digits': self.code_digits,
'anglo_saxon_accounting': self.use_anglo_saxon,
'bank_account_code_prefix': self.bank_account_code_prefix,
'cash_account_code_prefix': self.cash_account_code_prefix,
'chart_template_id': self.chart_template_id.id})
#set the coa currency to active
self.currency_id.write({'active': True})
# When we install the CoA of first company, set the currency to price types and pricelists
if company.id == 1:
for reference in ['product.list_price', 'product.standard_price', 'product.list0']:
try:
tmp2 = self.env.ref(reference).write({'currency_id': self.currency_id.id})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(company.id)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref = self.chart_template_id._install_template(company, code_digits=self.code_digits, transfer_account_id=self.transfer_account_id)
# write values of default taxes for product as super user
if self.sale_tax_id and taxes_ref:
ir_values_obj.sudo().set_default('product.template', "taxes_id", [taxes_ref[self.sale_tax_id.id]], for_all_users=True, company_id=company.id)
if self.purchase_tax_id and taxes_ref:
ir_values_obj.sudo().set_default('product.template', "supplier_taxes_id", [taxes_ref[self.purchase_tax_id.id]], for_all_users=True, company_id=company.id)
# Create Bank journals
self._create_bank_journals_from_o2m(company, acc_template_ref)
# Create the current year earning account (outside of the CoA)
self.env['account.account'].create({
'code': '9999',
'name': _('Undistributed Profits/Losses'),
'user_type_id': self.env.ref("account.data_unaffected_earnings").id,
'company_id': company.id,})
return {}
@api.multi
def _create_bank_journals_from_o2m(self, company, acc_template_ref):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_account_ids of the
wizard (which is currently only used to create a default bank and cash journal when the CoA is installed).
:param company: the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
'''
self.ensure_one()
# Create the journals that will trigger the account.account creation
for acc in self.bank_account_ids:
self.env['account.journal'].create({
'name': acc.acc_name,
'type': acc.account_type,
'company_id': company.id,
'currency_id': acc.currency_id.id,
})
class AccountBankAccountsWizard(models.TransientModel):
_name = 'account.bank.accounts.wizard'
acc_name = fields.Char(string='Account Name.', required=True)
bank_account_id = fields.Many2one('wizard.multi.charts.accounts', string='Bank Account', required=True, ondelete='cascade')
currency_id = fields.Many2one('res.currency', string='Account Currency',
help="Forces all moves for this account to have this secondary currency.")
account_type = fields.Selection([('cash', 'Cash'), ('bank', 'Bank')])
| gpl-3.0 | -9,035,051,368,661,658,000 | 56.554391 | 193 | 0.633488 | false |
saaros/pghoard | pghoard/webserver.py | 1 | 18310 | """
pghoard - internal http server for serving backup objects
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from http.server import HTTPServer, BaseHTTPRequestHandler
from pghoard import wal
from pghoard.common import json_encode
from pghoard.rohmu.compat import suppress
from pghoard.rohmu.errors import Error, FileNotFoundFromStorageError
from pghoard.version import __version__
from queue import Empty, Queue
from socketserver import ThreadingMixIn
from threading import Thread
import logging
import os
import tempfile
import time
class PoolMixIn(ThreadingMixIn):
def process_request(self, request, client_address):
self.pool.submit(self.process_request_thread, request, client_address)
class OwnHTTPServer(PoolMixIn, HTTPServer):
"""httpserver with 10 thread pool"""
pool = ThreadPoolExecutor(max_workers=10)
requested_basebackup_sites = None
class HttpResponse(Exception):
def __init__(self, msg=None, headers=None, status=500):
self.error = not (status >= 200 and status <= 299)
self.headers = headers or {}
self.msg = msg
self.status = status
if self.error:
super().__init__("{} {}: {}".format(self.__class__.__name__, status, msg))
else:
super().__init__("{} {}".format(self.__class__.__name__, status))
class WebServer(Thread):
def __init__(self, config, requested_basebackup_sites, compression_queue, transfer_queue):
super().__init__()
self.log = logging.getLogger("WebServer")
self.config = config
self.requested_basebackup_sites = requested_basebackup_sites
self.compression_queue = compression_queue
self.transfer_queue = transfer_queue
self.address = self.config["http_address"]
self.port = self.config["http_port"]
self.server = None
self._running = False
self.log.debug("WebServer initialized with address: %r port: %r", self.address, self.port)
def run(self):
# We bind the port only when we start running
self._running = True
self.server = OwnHTTPServer((self.address, self.port), RequestHandler)
self.server.config = self.config # pylint: disable=attribute-defined-outside-init
self.server.log = self.log # pylint: disable=attribute-defined-outside-init
self.server.requested_basebackup_sites = self.requested_basebackup_sites
self.server.compression_queue = self.compression_queue # pylint: disable=attribute-defined-outside-init
self.server.transfer_queue = self.transfer_queue # pylint: disable=attribute-defined-outside-init
# Bounded negative cache for failed prefetch operations - we don't want to try prefetching files that
# aren't there. This isn't used for explicit download requests as it's possible that a file appears
# later on in the object store.
self.server.prefetch_404 = deque(maxlen=32) # pylint: disable=attribute-defined-outside-init
self.server.serve_forever()
def close(self):
self.log.debug("Closing WebServer")
if self.server:
self.server.shutdown()
self.log.debug("Closed WebServer")
self._running = False
@property
def running(self):
return self._running
@running.setter
def running(self, value):
if self._running == value:
return
if value:
self.run()
else:
self.close()
class RequestHandler(BaseHTTPRequestHandler):
disable_nagle_algorithm = True
server_version = "pghoard/" + __version__
@contextmanager
def _response_handler(self, method):
self.server.log.debug("Request: %s %r", method, self.path)
path = self.path.lstrip("/").split("/")
resp = None
try:
yield path
except HttpResponse as ex:
resp = ex
except Exception as ex: # pylint: disable=broad-except
msg = "server failure: {0.__class__.__name__}: {0}".format(ex)
self.server.log.exception(msg)
resp = HttpResponse(msg, status=503)
else:
resp = HttpResponse("no response generated", status=500)
if resp.error:
self.server.log.warning(str(resp))
resp.headers.setdefault("content-type", "text/plain")
else:
self.server.log.debug(str(resp))
resp.headers.setdefault("content-type", "application/octet-stream")
if isinstance(resp.msg, dict):
bmsg = json_encode(resp.msg, compact=False, binary=True)
resp.headers["content-type"] = "application/json"
elif resp.msg:
bmsg = resp.msg.encode("utf-8")
else:
bmsg = b""
resp.headers["content-length"] = str(len(bmsg))
self.send_response(resp.status)
for k, v in resp.headers.items():
self.send_header(k, v)
self.end_headers()
self.wfile.write(bmsg)
def _parse_request(self, path):
if len(path) < 2:
raise HttpResponse("Invalid path {!r}".format(path), status=400)
site = path[0]
if site not in self.server.config["backup_sites"]:
raise HttpResponse("Site: {!r} not found for path {!r}".format(site, path), status=404)
obtype = path[1]
if obtype == "basebackup":
return site, obtype, None
if obtype in ("archive", "timeline", "xlog"):
if len(path) != 3:
raise HttpResponse("Invalid {!r} request, only single file retrieval is supported for now"
.format(obtype), status=400)
# allow postgresql's archive_command and restore_command to just feed in files without providing
# their types which isn't possible without a wrapper to add it.
if obtype == "archive":
if wal.XLOG_RE.match(path[2]):
obtype = "xlog"
elif wal.TIMELINE_RE.match(path[2]):
obtype = "timeline"
elif path[2] == "basebackup":
obtype = "basebackup"
else:
raise HttpResponse("Unrecognized file {!r} for archiving".format(path[2]), status=400)
return site, obtype, path[2]
raise HttpResponse("Invalid path {!r}".format(path), status=400)
def _verify_wal(self, filetype, filename, path):
if filetype != "xlog":
return
try:
wal.verify_wal(wal_name=filename, filepath=path)
except ValueError as ex:
raise HttpResponse(str(ex), status=412)
def _save_and_verify_restored_file(self, filetype, filename, tmp_target_path, target_path):
self._verify_wal(filetype, filename, tmp_target_path)
try:
os.rename(tmp_target_path, target_path)
except OSError as ex:
fmt = "Unable to write final file to requested location {path!r}: {ex.__class__.__name__}: {ex}"
raise HttpResponse(fmt.format(path=target_path, ex=ex), status=409)
def _transfer_agent_op(self, site, filename, filetype, method, *, retries=2, target_path=None):
start_time = time.time()
tmp_target_path = None
if method == "DOWNLOAD":
# NOTE: we request download on a temporary download path so we can atomically overwrite the file if /
# when we successfully receive it.
try:
fd, tmp_target_path = tempfile.mkstemp(prefix="{}.".format(target_path), suffix=".pghoard.tmp")
os.close(fd)
except OSError as ex:
raise HttpResponse("Unable to create temporary file for {0!r}: {1.__class__.__name__}: {1}"
.format(target_path, ex), status=400)
self.server.log.debug("Requesting site: %r, filename: %r, filetype: %r, target_path: %r",
site, filename, filetype, target_path)
callback_queue = Queue()
self.server.transfer_queue.put({
"callback_queue": callback_queue,
"filetype": filetype,
"local_path": filename,
"site": site,
"target_path": tmp_target_path,
"type": method,
})
try:
try:
response = callback_queue.get(timeout=30.0)
self.server.log.debug("Handled a %s request for: %r %r, took: %.3fs",
method, site, target_path, time.time() - start_time)
except Empty:
self.server.log.exception("Timeout on a %s request for: %r %r, took: %.3fs",
method, site, target_path, time.time() - start_time)
raise HttpResponse("TIMEOUT", status=500)
if not response["success"]:
if isinstance(response.get("exception"), FileNotFoundFromStorageError):
raise HttpResponse("{0.__class__.__name__}: {0}".format(response["exception"]), status=404)
raise HttpResponse(status=500)
except HttpResponse as ex:
if tmp_target_path:
with suppress(Exception):
os.unlink(tmp_target_path)
if ex.status == 500 and retries:
self.server.log.warning("Transfer operation failed, retrying (%r retries left)", retries)
return self._transfer_agent_op(site, filename, filetype, method,
retries=retries - 1, target_path=target_path)
raise
if tmp_target_path:
self._save_and_verify_restored_file(filetype, filename, tmp_target_path, target_path)
return response
@contextmanager
def _prefetch(self, site, filetype, names):
if not names:
return
start_time = time.monotonic()
callback_queue = Queue()
site_config = self.server.config["backup_sites"][site]
xlog_dir = site_config["pg_xlog_directory"]
downloads = {}
for obname in names:
if obname in self.server.prefetch_404:
continue # previously failed to prefetch this file, don't try again
prefetch_target_path = os.path.join(xlog_dir, "{}.pghoard.prefetch".format(obname))
if os.path.exists(prefetch_target_path):
continue # already fetched this file
try:
fd, tmp_target_path = tempfile.mkstemp(prefix="{}/{}.".format(xlog_dir, obname), suffix=".pghoard.tmp")
os.close(fd)
except OSError as ex:
self.server.log.error("Unable to create temporary file to prefetch %r: %s: %s",
obname, ex.__class__.__name__, ex)
continue
self.server.log.debug("Prefetching site: %r, filename: %r, filetype: %r, tmp_target_path: %r",
site, obname, filetype, tmp_target_path)
downloads[obname] = tmp_target_path
self.server.transfer_queue.put({
"callback_queue": callback_queue,
"filetype": filetype,
"local_path": obname,
"opaque": obname,
"site": site,
"target_path": tmp_target_path,
"type": "DOWNLOAD",
})
# allow something else to happen
try:
yield
finally:
# process results (timeout is 30 seconds after start but at least 5 seconds)
timeout_at = max(start_time + 30, time.monotonic() + 5)
while downloads:
time_left = timeout_at - time.monotonic()
try:
response = callback_queue.get(timeout=time_left)
except Empty:
break # timeout
obname = response["opaque"]
tmp_target_path = downloads.pop(response["opaque"])
if response["success"]:
prefetch_target_path = os.path.join(xlog_dir, "{}.pghoard.prefetch".format(obname))
os.rename(tmp_target_path, prefetch_target_path)
self.server.log.debug("Prefetched %r %r to %r, took: %.3fs",
site, obname, prefetch_target_path, time.monotonic() - start_time)
else:
ex = response.get("exception", Error)
if isinstance(ex, FileNotFoundFromStorageError):
# don't try prefetching this file again
self.server.prefetch_404.append(obname)
self.server.log.debug("Prefetching %r %r failed (%s), took: %.3fs",
site, obname, ex.__class__.__name__, time.monotonic() - start_time)
with suppress(Exception):
os.unlink(tmp_target_path)
# everything else timed out
while downloads:
obname, tmp_target_path = downloads.popitem()
self.server.log.debug("Prefetching %r %r timed out, took: %.3fs",
site, obname, time.monotonic() - start_time)
with suppress(Exception):
os.unlink(tmp_target_path)
def get_wal_or_timeline_file(self, site, filename, filetype):
target_path = self.headers.get("x-pghoard-target-path")
if not target_path:
raise HttpResponse("x-pghoard-target-path header missing from download", status=400)
# See if we have already prefetched the file
site_config = self.server.config["backup_sites"][site]
xlog_dir = site_config["pg_xlog_directory"]
prefetch_target_path = os.path.join(xlog_dir, "{}.pghoard.prefetch".format(filename))
if os.path.exists(prefetch_target_path):
self._save_and_verify_restored_file(filetype, filename, prefetch_target_path, target_path)
raise HttpResponse(status=201)
prefetch_n = self.server.config["restore_prefetch"]
prefetch = []
if filetype == "timeline":
tli_num = int(filename.replace(".history", ""), 16)
for _ in range(prefetch_n):
tli_num += 1
prefetch.append("{:08X}.history".format(tli_num))
elif filetype == "xlog":
xlog_num = int(filename, 16)
for _ in range(prefetch_n):
if xlog_num & 0xFF == 0xFF:
xlog_num += 0xFFFFFF00
xlog_num += 1
prefetch.append("{:024X}".format(xlog_num))
with self._prefetch(site, filetype, prefetch):
self._transfer_agent_op(site, filename, filetype, "DOWNLOAD", target_path=target_path)
raise HttpResponse(status=201)
def list_basebackups(self, site):
response = self._transfer_agent_op(site, "", "basebackup", "LIST")
raise HttpResponse({"basebackups": response["items"]}, status=200)
def handle_archival_request(self, site, filename, filetype):
if filetype == "basebackup":
# Request a basebackup to be made for site
self.server.log.debug("Requesting a new basebackup for site: %r to be made", site)
self.server.requested_basebackup_sites.add(site)
raise HttpResponse(status=201)
start_time = time.time()
site_config = self.server.config["backup_sites"][site]
xlog_dir = site_config["pg_xlog_directory"]
xlog_path = os.path.join(xlog_dir, filename)
self.server.log.debug("Got request to archive: %r %r %r, %r", site, filetype,
filename, xlog_path)
if not os.path.exists(xlog_path):
self.server.log.debug("xlog_path: %r did not exist, cannot archive, returning 404", xlog_path)
raise HttpResponse("N/A", status=404)
self._verify_wal(filetype, filename, xlog_path)
callback_queue = Queue()
if not self.server.config["backup_sites"][site]["object_storage"]:
compress_to_memory = False
else:
compress_to_memory = True
compression_event = {
"type": "CLOSE_WRITE",
"callback_queue": callback_queue,
"compress_to_memory": compress_to_memory,
"delete_file_after_compression": False,
"full_path": xlog_path,
"site": site,
}
self.server.compression_queue.put(compression_event)
try:
response = callback_queue.get(timeout=30)
self.server.log.debug("Handled an archival request for: %r %r, took: %.3fs",
site, xlog_path, time.time() - start_time)
except Empty:
self.server.log.exception("Problem in getting a response in time, returning 404, took: %.2fs",
time.time() - start_time)
raise HttpResponse("TIMEOUT", status=500)
if not response["success"]:
raise HttpResponse(status=500)
raise HttpResponse(status=201)
def do_PUT(self):
with self._response_handler("PUT") as path:
site, obtype, obname = self._parse_request(path)
assert obtype in ("basebackup", "xlog", "timeline")
self.handle_archival_request(site, obname, obtype)
def do_HEAD(self):
with self._response_handler("HEAD") as path:
site, obtype, obname = self._parse_request(path)
if self.headers.get("x-pghoard-target-path"):
raise HttpResponse("x-pghoard-target-path header is only valid for downloads", status=400)
self._transfer_agent_op(site, obname, obtype, "METADATA")
raise HttpResponse(status=200)
def do_GET(self):
with self._response_handler("GET") as path:
site, obtype, obname = self._parse_request(path)
if obtype == "basebackup":
self.list_basebackups(site)
else:
self.get_wal_or_timeline_file(site, obname, obtype)
| apache-2.0 | -1,124,583,806,674,144,100 | 42.595238 | 119 | 0.576461 | false |
truthcoin/truthcoin-cpp | contrib/linearize/linearize-hashes.py | 1 | 3041 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class TruthcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = TruthcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit | 8,576,335,789,278,602,000 | 25.911504 | 90 | 0.664255 | false |
theJollySin/python_for_scientists | classes/14_netcdf/extract_cell_from_3d_ioapi.py | 1 | 1291 |
import sys
from netCDF4 import Dataset
def main():
print(extract_cell_from_3d_ioapi(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), layer=0, var=None))
def extract_cell_from_3d_ioapi(file_path, row, col, layer=0, var=None):
'''Extract a single grid cell from a GRIDDED IOAPI NetCDF file.
If you don't provide a layer, we'll assume you want the ground layer.
If you don't provide a variable name, we'll assume you want all of them.
This will return a dictionary of each variable's values across the time dimension.
'''
# opening file as read-only
root = Dataset(file_path, 'r', format='NETCDF3_CLASSIC')
# find the variable names (remove TFLAG)
keys = root.variables.keys()
keys.remove('TFLAG')
if var is not None:
# if variable name is provided, and exists in the file
if var not in keys:
raise Exception('The variable ' + str(variable) + ' does not exist.')
else:
return {var: root.variables[var][:, layer, row, col]}
else:
# if variable name is not provided, return a dictionary of all variables
results = {}
for key in keys:
results[key] = root.variables[key][:, layer, row, col]
return results
if __name__ == "__main__":
main()
| gpl-3.0 | -4,119,864,125,831,360,000 | 32.102564 | 105 | 0.633617 | false |
daodewang/qingcloud-sdk-python | qingcloud/iaas/router_static.py | 1 | 8191 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.iaas.errors import InvalidRouterStatic
class RouterStaticFactory(object):
TYPE_PORT_FORWARDING = 1
TYPE_VPN = 2
TYPE_TUNNEL = 4
TYPE_FILTERING = 5
PPTP_DEFAULT_CONNS = 100
@classmethod
def create(cls, static_type, router_static_id='', **kw):
""" Create router static.
"""
if static_type not in STATIC_MAPPER:
raise InvalidRouterStatic('invalid static type[%s]' % static_type)
clazz = STATIC_MAPPER[static_type]
kw = clazz.extract(kw)
inst = clazz(**kw)
inst.router_static_id = router_static_id
return inst
@classmethod
def create_from_string(cls, string):
""" Create router static from json formatted string.
"""
data = json.loads(string)
if isinstance(data, dict):
return cls.create(**data)
if isinstance(data, list):
return [cls.create(**item) for item in data]
class _RouterStatic(object):
""" _RouterStatic is used to define static rule in router.
"""
router_static_id = None
static_type = None
def __repr__(self):
return '<%s>%s' % (self.__class__.__name__, self.to_json())
@staticmethod
def extract(kw):
raise NotImplementedError
def extra_props(self):
raise NotImplementedError
def to_json(self):
props = {
'router_static_id': self.router_static_id,
'static_type': self.static_type,
}
props.update(self.extra_props())
return props
class _StaticForPortForwarding(_RouterStatic):
static_type = RouterStaticFactory.TYPE_PORT_FORWARDING
def __init__(self, src_port, dst_ip, dst_port, protocol='tcp',
router_static_name='', **kw):
super(_StaticForPortForwarding, self).__init__()
self.router_static_name = router_static_name
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.protocol = protocol
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_port'] = kw.pop('val1')
if 'val2' in kw:
kw['dst_ip'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_port'] = kw.pop('val3')
if 'val4' in kw:
kw['protocol'] = kw.pop('val4')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_port,
'val2': self.dst_ip,
'val3': self.dst_port,
'val4': self.protocol,
}
class _StaticForVPN(_RouterStatic):
class OpenVPN(object):
def __init__(self, ip_network, serv_port='1194', serv_protocol='udp',
**kw):
self.serv_port = serv_port
self.serv_protocol = serv_protocol
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'openvpn',
'val2': self.serv_port,
'val3': self.serv_protocol,
'val4': self.ip_network,
}
class PPTP(object):
def __init__(self, usr, pwd, ip_network,
max_conn_cnt=RouterStaticFactory.PPTP_DEFAULT_CONNS, **kw):
self.usr = usr
self.pwd = pwd
self.max_conn_cnt = max_conn_cnt
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'pptp',
'val2': '%s:%s' % (self.usr, self.pwd),
'val3': self.max_conn_cnt,
'val4': self.ip_network,
}
static_type = RouterStaticFactory.TYPE_VPN
def __init__(self, vpn_type='', **kw):
super(_StaticForVPN, self).__init__()
vpn_type = vpn_type or kw.get('val1')
if vpn_type == 'openvpn':
self.inst = _StaticForVPN.OpenVPN(**kw)
elif vpn_type == 'pptp':
self.inst = _StaticForVPN.PPTP(**kw)
else:
raise InvalidRouterStatic('unsupported vpn type[%s]' % vpn_type)
@staticmethod
def extract(kw):
vpn_type = kw.get('val1')
if vpn_type == 'openvpn':
if 'val2' in kw:
kw['serv_port'] = kw.pop('val2')
if 'val3' in kw:
kw['serv_protocol'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
elif vpn_type == 'pptp':
if 'entry_set' in kw:
entry_set = kw['entry_set']
kw['usr'] = entry_set[0]['val1']
kw['pwd'] = ''
if 'val3' in kw:
kw['max_conn_cnt'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
return kw
def extra_props(self):
return self.inst.extra_props()
class _StaticForTunnel(_RouterStatic):
static_type = RouterStaticFactory.TYPE_TUNNEL
def __init__(self, vxnet_id, tunnel_entries, **kw):
"""
@param tunnel_entries: [(tunnel_type, ip_network, key), ...]
"""
super(_StaticForTunnel, self).__init__()
self.vxnet_id = vxnet_id
self.tunnel_entries = tunnel_entries
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['tunnel_entries'] = [tuple(entry.split('|')) for entry in kw.pop('val1').split(';')]
return kw
def extra_props(self):
return {
'vxnet_id': self.vxnet_id,
'val1': ';'.join('%s|%s|%s' % entry for entry in self.tunnel_entries),
}
class _StaticForFiltering(_RouterStatic):
static_type = RouterStaticFactory.TYPE_FILTERING
def __init__(self, router_static_name='', src_ip='', src_port='',
dst_ip='', dst_port='', priority='1', action='', **kw):
super(_StaticForFiltering, self).__init__()
self.router_static_name = router_static_name
self.src_ip = src_ip
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.priority = priority
self.action = action
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_ip'] = kw.pop('val1')
if 'val2' in kw:
kw['src_port'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_ip'] = kw.pop('val3')
if 'val4' in kw:
kw['dst_port'] = kw.pop('val4')
if 'val5' in kw:
kw['priority'] = kw.pop('val5')
if 'val6' in kw:
kw['action'] = kw.pop('val6')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_ip,
'val2': self.src_port,
'val3': self.dst_ip,
'val4': self.dst_port,
'val5': self.priority,
'val6': self.action,
}
STATIC_MAPPER = {
RouterStaticFactory.TYPE_PORT_FORWARDING: _StaticForPortForwarding,
RouterStaticFactory.TYPE_VPN: _StaticForVPN,
RouterStaticFactory.TYPE_TUNNEL: _StaticForTunnel,
RouterStaticFactory.TYPE_FILTERING: _StaticForFiltering,
}
| apache-2.0 | 8,290,995,983,507,668,000 | 30.503846 | 99 | 0.521182 | false |
matplotlib/freetypy | docstrings/truetype.py | 1 | 5852 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_PLATFORM = """
Platform identifier codes.
- `APPLE_UNICODE`: Used by Apple to indicate a Unicode character map
and/or name entry. See `TT_APPLE_ID` for corresponding ‘encoding_id’
values. Note that name entries in this format are coded as
big-endian UCS-2 character codes only.
- `MACINTOSH`: Used by Apple to indicate a MacOS-specific charmap
and/or name entry. See `TT_MAC_ID` for corresponding ‘encoding_id’
values. Note that most TrueType fonts contain an Apple roman charmap
to be usable on MacOS systems (even if they contain a Microsoft
charmap as well).
- `ISO`: This value was used to specify ISO/IEC 10646 charmaps. It is
however now deprecated. See `TT_ISO_ID` for a list of
corresponding ‘encoding_id’ values.
- `MICROSOFT`: Used by Microsoft to indicate Windows-specific
charmaps. See `TT_MS_ID` for a list of corresponding ‘encoding_id’
values. Note that most fonts contain a Unicode charmap using
(`TT_PLATFORM.MICROSOFT`, `TT_MS_ID.UNICODE_CS`).
- `CUSTOM`: Used to indicate application-specific charmaps.
- `ADOBE`: This value isn't part of any font format specification, but
is used by FreeType to report Adobe-specific charmaps in an
`CharMap` object. See `TT_ADOBE_ID`.
"""
TT_APPLE_ID = """
Apple-specific encoding values.
- `DEFAULT`: Unicode version 1.0.
- `UNICODE_1_1`: Unicode 1.1; specifies Hangul characters starting at
U+34xx.
- `ISO_10646`: Deprecated (identical to preceding).
- `UNICODE_2_0`: Unicode 2.0 and beyond (UTF-16 BMP only).
- `UNICODE_32`: Unicode 3.1 and beyond, using UTF-32.
- `VARIANT_SELECTOR`: From Adobe, not Apple. Not a normal
cmap. Specifies variations on a real cmap.
"""
TT_ADOBE_ID = """
Adobe-specific encoding values.
- `STANDARD`: Adobe standard encoding.
- `EXPERT`: Adobe expert encoding.
- `CUSTOM`: Adobe custom encoding.
- `LATIN_1`: Adobe Latin 1 encoding.
"""
TT_ISO_ID = """
Standard ISO encodings.
- `ISO_7BIT_ASCII`: ASCII.
- `ISO_10646`: ISO/10646.
- `ISO_8859_1`: Also known as Latin-1.
"""
TT_MAC_ID = """
Macintosh-specific encoding values.
- `ROMAN`
- `JAPANESE`
- `TRADITIONAL_CHINESE`
- `KOREAN`
- `ARABIC`
- `HEBREW`
- `GREEK`
- `RUSSIAN`
- `RSYMBOL`
- `DEVANAGARI`
- `GURMUKHI`
- `GUJARATI`
- `ORIYA`
- `BENGALI`
- `TAMIL`
- `TELUGU`
- `KANNADA`
- `MALAYALAM`
- `SINHALESE`
- `BURMESE`
- `KHMER`
- `THAI`
- `LAOTIAN`
- `GEORGIAN`
- `ARMENIAN`
- `MALDIVIAN`
- `SIMPLIFIED_CHINESE`
- `TIBETAN`
- `MONGOLIAN`
- `GEEZ`
- `SLAVIC`
- `VIETNAMESE`
- `SINDHI`
- `UNINTERP`
"""
TT_MAC_LANGID = """
Language identifier.
Used in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MACINTOSH`.
"""
TT_MS_ID = """
Microsoft-specific encoding values.
- `SYMBOL_CS`: Corresponds to Microsoft symbol encoding. See
`ENCODING.MS_SYMBOL`.
- `UNICODE_CS`: Corresponds to a Microsoft WGL4 charmap, matching
Unicode. See `ENCODING.UNICODE`.
- `SJIS`: Corresponds to SJIS Japanese encoding. See `ENCODING.SJIS`.
- `GB2312`: Corresponds to Simplified Chinese as used in Mainland
China. See `ENCODING.GB2312`.
- `BIG_5`: Corresponds to Traditional Chinese as used in Taiwan and
Hong Kong. See `ENCODING.BIG5`.
- `WANSUNG`: Corresponds to Korean Wansung encoding. See
`ENCODING.WANSUNG`.
- `JOHAB`: Corresponds to Johab encoding. See `ENCODING.JOHAB`.
- `UCS_4`: Corresponds to UCS-4 or UTF-32 charmaps. This has been
added to the OpenType specification version 1.4 (mid-2001.)
"""
TT_MS_LANGID = """
Language identifier.
Used in in the name records of the TTF "name" table if the "platform"
identifier code is `TT_PLATFORM.MICROSOFT`.
"""
TT_NAME_ID = """
The type of value stored in a `SfntName` record.
- `COPYRIGHT`
- `FONT_FAMILY`
- `FONT_SUBFAMILY`
- `UNIQUE_ID`
- `FULL_NAME`
- `VERSION_STRING`
- `PS_NAME`
- `TRADEMARK`
The following values are from the OpenType spec:
- `MANUFACTURER`
- `DESIGNER`
- `DESCRIPTION`
- `VENDOR_URL`
- `DESIGNER_URL`
- `LICENSE`
- `LICENSE_URL`
- `PREFERRED_FAMILY`
- `PREFERRED_SUBFAMILY`
- `MAC_FULL_NAME`
- `SAMPLE_TEXT`
This is new in OpenType 1.3:
- `CID_FINDFONT_NAME`
This is new in OpenType 1.5:
- `WWS_FAMILY`
- `WWS_SUBFAMILY`
"""
| bsd-2-clause | -7,203,823,751,697,514,000 | 25.527273 | 72 | 0.72207 | false |
foursquare/pants | contrib/errorprone/src/python/pants/contrib/errorprone/tasks/errorprone.py | 1 | 7607 | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from builtins import str
from pants.backend.jvm import argfile
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.java.jar.jar_dependency import JarDependency
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from pants.util.strutil import safe_shlex_split
class ErrorProne(NailgunTask):
"""Check Java code for Error Prone violations. See http://errorprone.info/ for more details."""
_ERRORPRONE_MAIN = 'com.google.errorprone.ErrorProneCompiler'
_JAVA_SOURCE_EXTENSION = '.java'
@classmethod
def register_options(cls, register):
super(ErrorProne, cls).register_options(register)
register('--skip', type=bool, help='Skip Error Prone.')
register('--transitive', default=False, type=bool,
help='Run Error Prone against transitive dependencies of targets '
'specified on the command line.')
register('--command-line-options', type=list, default=[], fingerprint=True,
help='Command line options passed to Error Prone')
register('--exclude-patterns', type=list, default=[], fingerprint=True,
help='Patterns for targets to be excluded from analysis.')
cls.register_jvm_tool(register,
'errorprone',
classpath=[
JarDependency(org='com.google.errorprone',
name='error_prone_core',
rev='2.3.1'),
],
main=cls._ERRORPRONE_MAIN,
custom_rules=[
Shader.exclude_package('com.google.errorprone', recursive=True)
]
)
# The javac version should be kept in sync with the version used by errorprone above.
cls.register_jvm_tool(register,
'errorprone-javac',
classpath=[
JarDependency(org='com.google.errorprone',
name='javac',
rev='9+181-r4173-1'),
])
@classmethod
def prepare(cls, options, round_manager):
super(ErrorProne, cls).prepare(options, round_manager)
round_manager.require_data('runtime_classpath')
@memoized_property
def _exclude_patterns(self):
return [re.compile(x) for x in set(self.get_options().exclude_patterns or [])]
def _is_errorprone_target(self, target):
if not target.has_sources(self._JAVA_SOURCE_EXTENSION):
self.context.log.debug('Skipping [{}] because it has no {} sources'.format(target.address.spec, self._JAVA_SOURCE_EXTENSION))
return False
if target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
@property
def cache_target_dirs(self):
return True
def execute(self):
if self.get_options().skip:
return
if self.get_options().transitive:
targets = self.context.targets(self._is_errorprone_target)
else:
targets = [t for t in self.context.target_roots if self._is_errorprone_target(t)]
targets = list(set(targets))
target_count = 0
errorprone_failed = False
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
total_targets = len(invalidation_check.invalid_vts)
for vt in invalidation_check.invalid_vts:
target_count += 1
self.context.log.info('[{}/{}] {}'.format(
str(target_count).rjust(len(str(total_targets))),
total_targets,
vt.target.address.spec))
result = self.errorprone(vt.target)
if result != 0:
errorprone_failed = True
if self.get_options().fail_fast:
break
else:
vt.update()
if errorprone_failed:
raise TaskError('ErrorProne checks failed')
def calculate_sources(self, target):
return {source for source in target.sources_relative_to_buildroot()
if source.endswith(self._JAVA_SOURCE_EXTENSION)}
def errorprone(self, target):
runtime_classpaths = self.context.products.get_data('runtime_classpath')
runtime_classpath = [jar for conf, jar in runtime_classpaths.get_for_targets(target.closure(bfs=True))]
output_dir = os.path.join(self.workdir, target.id)
safe_mkdir(output_dir)
runtime_classpath.append(output_dir)
# Try to run errorprone with the same java version as the target
# The minimum JDK for errorprone is JDK 1.8
min_jdk_version = max(target.platform.target_level, Revision.lenient('1.8'))
if min_jdk_version.components[0] == 1:
max_jdk_version = Revision(min_jdk_version.components[0], min_jdk_version.components[1], '9999')
else:
max_jdk_version = Revision(min_jdk_version.components[0], '9999')
self.set_distribution(minimum_version=min_jdk_version, maximum_version=max_jdk_version, jdk=True)
jvm_options = self.get_options().jvm_options[:]
if self.dist.version < Revision.lenient('9'):
# For Java 8 we need to add the errorprone javac jar to the bootclasspath to
# avoid the "java.lang.NoSuchFieldError: ANNOTATION_PROCESSOR_MODULE_PATH" error
# See https://github.com/google/error-prone/issues/653 for more information
jvm_options.extend(['-Xbootclasspath/p:{}'.format(self.tool_classpath('errorprone-javac')[0])])
args = [
'-d', output_dir,
]
# Errorprone does not recognize source or target 10 yet
if target.platform.source_level < Revision.lenient('10'):
args.extend(['-source', str(target.platform.source_level)])
if target.platform.target_level < Revision.lenient('10'):
args.extend(['-target', str(target.platform.target_level)])
errorprone_classpath_file = os.path.join(self.workdir, '{}.classpath'.format(os.path.basename(output_dir)))
with open(errorprone_classpath_file, 'w') as f:
f.write('-classpath ')
f.write(':'.join(runtime_classpath))
args.append('@{}'.format(errorprone_classpath_file))
for opt in self.get_options().command_line_options:
args.extend(safe_shlex_split(opt))
with argfile.safe_args(self.calculate_sources(target), self.get_options()) as batched_sources:
args.extend(batched_sources)
result = self.runjava(classpath=self.tool_classpath('errorprone'),
main=self._ERRORPRONE_MAIN,
jvm_options=jvm_options,
args=args,
workunit_name='errorprone',
workunit_labels=[WorkUnitLabel.LINT])
self.context.log.debug('java {main} ... exited with result ({result})'.format(
main=self._ERRORPRONE_MAIN, result=result))
return result
| apache-2.0 | 2,359,390,993,532,608,000 | 40.342391 | 131 | 0.640331 | false |
nephthys/Nouweo | nouweo/wsgi.py | 1 | 1419 | """
WSGI config for nouweo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "nouweo.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nouweo.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 | 214,798,505,212,677,300 | 43.34375 | 79 | 0.792812 | false |
ace02000/pyload | module/plugins/accounts/Keep2ShareCc.py | 1 | 2362 | # -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class Keep2ShareCc(Account):
__name__ = "Keep2ShareCc"
__type__ = "account"
__version__ = "0.10"
__status__ = "testing"
__description__ = """Keep2Share.cc account plugin"""
__license__ = "GPLv3"
__authors__ = [("aeronaut", "[email protected]"),
("Walter Purcaro", "[email protected]")]
VALID_UNTIL_PATTERN = r'Premium expires:\s*<b>(.+?)<'
TRAFFIC_LEFT_PATTERN = r'Available traffic \(today\):\s*<b><a href="/user/statistic.html">(.+?)<'
LOGIN_FAIL_PATTERN = r'Please fix the following input errors'
def grab_info(self, user, password, data):
validuntil = None
trafficleft = -1
premium = False
html = self.load("http://keep2share.cc/site/profile.html")
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
expiredate = m.group(1).strip()
self.log_debug("Expire date: " + expiredate)
if expiredate == "LifeTime":
premium = True
validuntil = -1
else:
try:
validuntil = time.mktime(time.strptime(expiredate, "%Y.%m.%d"))
except Exception, e:
self.log_error(e)
else:
premium = True if validuntil > time.mktime(time.gmtime()) else False
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
try:
trafficleft = self.parse_traffic(m.group(1))
except Exception, e:
self.log_error(e)
return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
def signin(self, user, password, data):
set_cookie(self.req.cj, "keep2share.cc", "lang", "en")
html = self.load("https://keep2share.cc/login.html",
post={'LoginForm[username]' : user,
'LoginForm[password]' : password,
'LoginForm[rememberMe]': 1,
'yt0' : ""})
if re.search(self.LOGIN_FAIL_PATTERN, html):
self.fail_login()
| gpl-3.0 | -2,316,719,965,670,955,000 | 30.918919 | 101 | 0.516511 | false |
namgivu/shared-model-FlaskSqlAlchemy-vs-SQLAlchemy | python-app/model/user.py | 1 | 1292 | from base_model import BaseModel
import sqlalchemy as db
class User(BaseModel):
#table mapping
__tablename__ = "users"
##region column mapping
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.Text)
primary_email_id = db.Column(db.Integer, db.ForeignKey('user_emails.id') )
#Use model class instead of physical table name for db.ForeignKey() ref. http://stackoverflow.com/a/41633052/248616
from model.address import Address
billing_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
shipping_address_id = db.Column(db.Integer, db.ForeignKey(Address.__table__.c['id'] ))
##endregion column mapping
##region relationship obj
emails = db.relationship('UserEmail',
primaryjoin='User.id==UserEmail.user_id',
back_populates='owner')
primaryEmail = db.relationship('UserEmail',
primaryjoin='User.primary_email_id==UserEmail.id')
billingAddress = db.relationship('Address',
primaryjoin='User.billing_address_id==Address.id')
shippingAddress = db.relationship('Address',
primaryjoin='User.shipping_address_id==Address.id')
##endregion relationship obj
| gpl-3.0 | 6,215,019,284,809,545,000 | 37 | 117 | 0.647059 | false |
Disiok/poetry-seq2seq | utils.py | 1 | 1442 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import os
VOCAB_SIZE = 6000
SEP_TOKEN = 0
PAD_TOKEN = 5999
DATA_RAW_DIR = 'data/raw'
DATA_PROCESSED_DIR = 'data/processed'
DATA_SAMPLES_DIR = 'data/samples'
MODEL_DIR = 'model'
LOG_DIR = 'log'
if not os.path.exists(DATA_PROCESSED_DIR):
os.mkdir(DATA_PROCESSED_DIR)
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
def embed_w2v(embedding, data_set):
embedded = [map(lambda x: embedding[x], sample) for sample in data_set]
return embedded
def apply_one_hot(data_set):
applied = [map(lambda x: to_categorical(x, num_classes=VOCAB_SIZE)[0], sample) for sample in data_set]
return applied
def apply_sparse(data_set):
applied = [map(lambda x: [x], sample) for sample in data_set]
return applied
def pad_to(lst, length, value):
for i in range(len(lst), length):
lst.append(value)
return lst
def uprint(x):
print repr(x).decode('unicode-escape'),
def uprintln(x):
print repr(x).decode('unicode-escape')
def is_CN_char(ch):
return ch >= u'\u4e00' and ch <= u'\u9fa5'
def split_sentences(line):
sentences = []
i = 0
for j in range(len(line)+1):
if j == len(line) or line[j] in [u',', u'。', u'!', u'?', u'、']:
if i < j:
sentence = u''.join(filter(is_CN_char, line[i:j]))
sentences.append(sentence)
i = j+1
return sentences
| mit | -4,215,054,157,706,704,400 | 20.058824 | 106 | 0.609637 | false |
rohitwaghchaure/erpnext_develop | erpnext/regional/united_arab_emirates/setup.py | 1 | 2919 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from erpnext.setup.setup_wizard.operations.taxes_setup import create_sales_tax
def setup(company=None, patch=True):
make_custom_fields()
add_print_formats()
if company:
create_sales_tax(company)
def make_custom_fields():
invoice_fields = [
dict(fieldname='vat_section', label='VAT Details', fieldtype='Section Break',
insert_after='select_print_heading', print_hide=1, collapsible=1),
dict(fieldname='permit_no', label='Permit Number',
fieldtype='Data', insert_after='vat_section', print_hide=1),
dict(fieldname='reverse_charge_applicable', label='Reverse Charge Applicable',
fieldtype='Select', insert_after='permit_no', print_hide=1,
options='Y\nN', default='N')
]
purchase_invoice_fields = [
dict(fieldname='company_trn', label='Company TRN',
fieldtype='Read Only', insert_after='shipping_address',
options='company.tax_id', print_hide=1),
dict(fieldname='supplier_name_in_arabic', label='Supplier Name in Arabic',
fieldtype='Read Only', insert_after='supplier_name',
options='supplier.supplier_name_in_arabic', print_hide=1)
]
sales_invoice_fields = [
dict(fieldname='company_trn', label='Company TRN',
fieldtype='Read Only', insert_after='company_address',
options='company.tax_id', print_hide=1),
dict(fieldname='customer_name_in_arabic', label='Customer Name in Arabic',
fieldtype='Read Only', insert_after='customer_name',
options='customer.customer_name_in_arabic', print_hide=1),
]
tax_code_field = dict(fieldname='tax_code', label='Tax Code',
fieldtype='Read Only', options='item_code.tax_code', insert_after='description',
allow_on_submit=1, print_hide=1)
custom_fields = {
'Item': [
dict(fieldname='tax_code', label='Tax Code',
fieldtype='Data', insert_after='item_group'),
],
'Customer': [
dict(fieldname='customer_name_in_arabic', label='Customer Name in Arabic',
fieldtype='Data', insert_after='customer_name'),
],
'Supplier': [
dict(fieldname='supplier_name_in_arabic', label='Supplier Name in Arabic',
fieldtype='Data', insert_after='supplier_name'),
],
'Purchase Invoice': purchase_invoice_fields + invoice_fields,
'Sales Invoice': sales_invoice_fields + invoice_fields,
'Sales Invoice Item': [tax_code_field],
'Purchase Invoice Item': [tax_code_field]
}
create_custom_fields(custom_fields)
def add_print_formats():
frappe.reload_doc("regional", "print_format", "detailed_tax_invoice")
frappe.reload_doc("regional", "print_format", "simplified_tax_invoice")
frappe.db.sql(""" update `tabPrint Format` set disabled = 0 where
name in('Simplified Tax Invoice', 'Detailed Tax Invoice') """)
| gpl-3.0 | 1,549,981,976,826,451,200 | 37.407895 | 82 | 0.711545 | false |
googleinterns/ddsp-docker | mvp/trainer/helper_functions.py | 1 | 2589 | """Helper functions for interatcing with Magenta DDSP internals.
"""
import json
import os
from absl import logging
from ddsp.training import train_util
import tensorflow.compat.v2 as tf
from google.cloud import storage
def get_strategy(tpu='', gpus=None):
"""Chooses a distribution strategy.
AI Platform automatically sets TF_CONFIG environment variable based
on provided config file. If training is run on multiple VMs different strategy
needs to be chosen than when it is run on only one VM. This function determines
the strategy based on the information in TF_CONFIG variable.
Args:
tpu:
Argument for DDSP library function call.
Address of the TPU. No TPU if left blank.
gpus:
Argument for DDSP library function call.
List of GPU addresses for synchronous training.
Returns:
A distribution strategy.
"""
if 'TF_CONFIG' in os.environ:
tf_config_str = os.environ.get('TF_CONFIG')
logging.info("TFRecord %s", tf_config_str)
tf_config_dict = json.loads(tf_config_str)
# Exactly one chief worker is always specified inside the TF_CONFIG variable
# in the cluster section. If there are any other workers specified MultiWorker
# strategy needs to be chosen.
if len(tf_config_dict["cluster"]) > 1:
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
logging.info('Cluster spec: %s', strategy.cluster_resolver.cluster_spec())
else:
strategy = train_util.get_strategy(tpu=tpu, gpus=gpus)
else:
strategy = train_util.get_strategy(tpu=tpu, gpus=gpus)
return strategy
def copy_config_file_from_gstorage(gstorage_path, container_path):
"""Downloads configuration path from the bucket to the container.
Args:
gstorage_path:
Path to the file inside the bucket that needs to be downloaded.
Format: gs://bucket-name/path/to/file.txt
container_path:
Path inside the container where downloaded file should be stored.
"""
gstorage_path = gstorage_path.strip('gs:/')
bucket_name = gstorage_path.split('/')[0]
blob_name = os.path.relpath(gstorage_path, bucket_name)
print(bucket_name, blob_name)
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.download_to_filename(container_path)
logging.info('Downloaded config file inside the container. Current location: %s', container_path)
| apache-2.0 | -4,077,344,281,650,267,000 | 35.464789 | 101 | 0.677095 | false |
w1ll1am23/home-assistant | homeassistant/components/smartthings/sensor.py | 1 | 12696 | """Support for sensors through the SmartThings cloud API."""
from __future__ import annotations
from collections import namedtuple
from typing import Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
AREA_SQUARE_METERS,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLT,
VOLUME_CUBIC_METERS,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple("map", "attribute name default_unit device_class")
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None)
],
Capability.air_conditioner_mode: [
Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None, None)
],
Capability.air_quality_sensor: [
Map(Attribute.air_quality, "Air Quality", "CAQI", None)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None)],
Capability.audio_volume: [Map(Attribute.volume, "Volume", PERCENTAGE, None)],
Capability.battery: [
Map(Attribute.battery, "Battery", PERCENTAGE, DEVICE_CLASS_BATTERY)
],
Capability.body_mass_index_measurement: [
Map(
Attribute.bmi_measurement,
"Body Mass Index",
f"{MASS_KILOGRAMS}/{AREA_SQUARE_METERS}",
None,
)
],
Capability.body_weight_measurement: [
Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS, None)
],
Capability.carbon_dioxide_measurement: [
Map(
Attribute.carbon_dioxide,
"Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.carbon_monoxide_detector: [
Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None, None)
],
Capability.carbon_monoxide_measurement: [
Map(
Attribute.carbon_monoxide_level,
"Carbon Monoxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.dishwasher_operating_state: [
Map(Attribute.machine_state, "Dishwasher Machine State", None, None),
Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None, None),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dryer_mode: [Map(Attribute.dryer_mode, "Dryer Mode", None, None)],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dust_sensor: [
Map(Attribute.fine_dust_level, "Fine Dust Level", None, None),
Map(Attribute.dust_level, "Dust Level", None, None),
],
Capability.energy_meter: [
Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.formaldehyde_measurement: [
Map(
Attribute.formaldehyde_level,
"Formaldehyde Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.gas_meter: [
Map(Attribute.gas_meter, "Gas Meter", ENERGY_KILO_WATT_HOUR, None),
Map(Attribute.gas_meter_calorific, "Gas Meter Calorific", None, None),
Map(Attribute.gas_meter_time, "Gas Meter Time", None, DEVICE_CLASS_TIMESTAMP),
Map(Attribute.gas_meter_volume, "Gas Meter Volume", VOLUME_CUBIC_METERS, None),
],
Capability.illuminance_measurement: [
Map(Attribute.illuminance, "Illuminance", LIGHT_LUX, DEVICE_CLASS_ILLUMINANCE)
],
Capability.infrared_level: [
Map(Attribute.infrared_level, "Infrared Level", PERCENTAGE, None)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None)
],
Capability.media_playback_repeat: [
Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None, None)
],
Capability.media_playback_shuffle: [
Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None)
],
Capability.odor_sensor: [Map(Attribute.odor_level, "Odor Sensor", None, None)],
Capability.oven_mode: [Map(Attribute.oven_mode, "Oven Mode", None, None)],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None)
],
Capability.power_meter: [Map(Attribute.power, "Power Meter", POWER_WATT, None)],
Capability.power_source: [Map(Attribute.power_source, "Power Source", None, None)],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
)
],
Capability.robot_cleaner_movement: [
Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None, None)
],
Capability.robot_cleaner_turbo_mode: [
Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode", None, None)
],
Capability.signal_strength: [
Map(Attribute.lqi, "LQI Signal Strength", None, None),
Map(Attribute.rssi, "RSSI Signal Strength", None, None),
],
Capability.smoke_detector: [Map(Attribute.smoke, "Smoke Detector", None, None)],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_fan_mode: [
Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_mode: [
Map(Attribute.thermostat_mode, "Thermostat Mode", None, None)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.three_axis: [],
Capability.tv_channel: [
Map(Attribute.tv_channel, "Tv Channel", None, None),
Map(Attribute.tv_channel_name, "Tv Channel Name", None, None),
],
Capability.tvoc_measurement: [
Map(
Attribute.tvoc_level,
"Tvoc Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.ultraviolet_index: [
Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None)
],
Capability.voltage_measurement: [
Map(Attribute.voltage, "Voltage Measurement", VOLT, None)
],
Capability.washer_mode: [Map(Attribute.washer_mode, "Washer Mode", None, None)],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device, m.attribute, m.name, m.default_unit, m.device_class
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self, device, attribute: str, name: str, default_unit: str, device_class: str
):
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def state(self):
"""Return the state of the sensor."""
return self._device.status.attributes[self._attribute].value
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {THREE_AXIS_NAMES[self._index]}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{THREE_AXIS_NAMES[self._index]}"
@property
def state(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
| apache-2.0 | -5,184,662,723,028,571,000 | 32.856 | 88 | 0.604285 | false |
LucienD/ww | tests/test_s.py | 1 | 2614 | # coding: utf-8
from __future__ import (
unicode_literals, division, print_function, absolute_import
)
import re
import pytest
from ww import s, g, f
def test_lshift():
res = s >> """
This is a long text
And it's not indented
"""
assert isinstance(res, s)
s == "This is a long text\nAnd it's not indented"
def test_split():
gen = s('test').split(',')
assert isinstance(gen, g)
assert gen.list() == ['test']
assert s('test,test').split(',').list() == ['test', 'test']
assert s('a,b,c').split(',', maxsplit=1).list() == ['a', 'b,c']
assert s('a,b,c').split('b,').list() == ['a,', 'c']
assert s('a,b;c/d').split(',', ';', '/').list() == ['a', 'b', 'c', 'd']
assert s(r'a1b33c-d').split('\d+').list() == ['a', 'b', 'c-d']
assert s(r'a1b33c-d').split('\d+', '-').list() == ['a', 'b', 'c', 'd']
assert s(r'cAt').split('a', flags='i').list() == ['c', 't']
assert s(r'cAt').split('a', flags=re.I).list() == ['c', 't']
def test_replace():
st = s('test').replace(',', '')
assert isinstance(st, s)
assert st == 'test'
assert s('test,test').replace(',', ';') == 'test;test'
assert s('a,b,c').replace(',', ';', maxreplace=1) == 'a;b,c'
assert s('a,b,c').replace(',b,', ';') == 'a;c'
assert s('a,b;c/d').replace((',', ';', '/'), (',', ',', ',')) == 'a,b,c,d'
assert s('a,b;c/d').replace((',', ';', '/'), ',') == 'a,b,c,d'
assert s(r'a1b33c-d').replace('\d+', ',') == 'a,b,c-d'
assert s(r'a1b33c-d').replace(('\d+', '-'), ',') == 'a,b,c,d'
assert s(r'cAt').replace('a', 'b', flags='i') == 'cbt'
assert s(r'cAt').replace('a', 'b', flags=re.I) == 'cbt'
def test_join():
assert s(';').join('abc') == "a;b;c"
assert s(';').join(range(3)) == "0;1;2"
assert s(';').join(range(3), template="{:.1f}") == "0.0;1.0;2.0"
assert s(';').join(range(3), formatter=lambda s, t: "a") == "a;a;a"
def test_from_bytes():
assert isinstance(s.from_bytes(b'abc', 'ascii'), s)
assert s.from_bytes(b'abc', 'ascii') == 'abc'
assert s.from_bytes('é'.encode('utf8'), 'utf8') == 'é'
with pytest.raises(UnicodeDecodeError):
s.from_bytes('é'.encode('cp850'), 'ascii')
with pytest.raises(ValueError):
s.from_bytes('é'.encode('cp850'))
def test_format():
foo = 1
bar = [1]
string = s('{foo} {bar[0]:.1f}')
assert isinstance(string.format(foo=foo, bar=bar), s)
assert string.format(foo=foo, bar=bar) == "1 1.0"
assert f(string) == "1 1.0"
assert isinstance(f(string), s)
assert f('{foo} {bar[0]:.1f}') == "1 1.0"
| mit | 1,834,878,893,787,160,300 | 24.339806 | 78 | 0.502299 | false |
pixelpicosean/my-godot-2.1 | platform/android/detect.py | 1 | 9664 | import os
import sys
import string
import platform
def is_active():
return True
def get_name():
return "Android"
def can_build():
import os
if (not os.environ.has_key("ANDROID_NDK_ROOT")):
return False
return True
def get_opts():
return [
('ANDROID_NDK_ROOT', 'the path to Android NDK',
os.environ.get("ANDROID_NDK_ROOT", 0)),
('ndk_platform', 'compile for platform: (android-<api> , example: android-14)', "android-14"),
('android_arch', 'select compiler architecture: (armv7/armv6/x86)', "armv7"),
('android_neon', 'enable neon (armv7 only)', "yes"),
('android_stl', 'enable STL support in android port (for modules)', "no")
]
def get_flags():
return [
('tools', 'no'),
]
def create(env):
tools = env['TOOLS']
if "mingw" in tools:
tools.remove('mingw')
if "applelink" in tools:
tools.remove("applelink")
env.Tool('gcc')
return env.Clone(tools=tools)
def configure(env):
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
import os
if (os.name == "nt"):
import subprocess
def mySubProcess(cmdline, env):
# print "SPAWNED : " + cmdline
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
env['SPAWN'] = mySpawn
ndk_platform = env['ndk_platform']
if env['android_arch'] not in ['armv7', 'armv6', 'x86']:
env['android_arch'] = 'armv7'
if env['android_arch'] == 'x86':
env["x86_libtheora_opt_gcc"] = True
if env['PLATFORM'] == 'win32':
env.Tool('gcc')
env['SHLIBSUFFIX'] = '.so'
neon_text = ""
if env["android_arch"] == "armv7" and env['android_neon'] == 'yes':
neon_text = " (with neon)"
print("Godot Android!!!!! (" + env['android_arch'] + ")" + neon_text)
env.Append(CPPPATH=['#platform/android'])
if env['android_arch'] == 'x86':
env.extra_suffix = ".x86" + env.extra_suffix
target_subpath = "x86-4.9"
abi_subpath = "i686-linux-android"
arch_subpath = "x86"
elif env['android_arch'] == 'armv6':
env.extra_suffix = ".armv6" + env.extra_suffix
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi"
elif env["android_arch"] == "armv7":
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi-v7a"
if env['android_neon'] == 'yes':
env.extra_suffix = ".armv7.neon" + env.extra_suffix
else:
env.extra_suffix = ".armv7" + env.extra_suffix
mt_link = True
if (sys.platform.startswith("linux")):
host_subpath = "linux-x86_64"
elif (sys.platform.startswith("darwin")):
host_subpath = "darwin-x86_64"
elif (sys.platform.startswith('win')):
if (platform.machine().endswith('64')):
host_subpath = "windows-x86_64"
else:
mt_link = False
host_subpath = "windows"
compiler_path = env["ANDROID_NDK_ROOT"] + \
"/toolchains/llvm/prebuilt/" + host_subpath + "/bin"
gcc_toolchain_path = env["ANDROID_NDK_ROOT"] + \
"/toolchains/" + target_subpath + "/prebuilt/" + host_subpath
tools_path = gcc_toolchain_path + "/" + abi_subpath + "/bin"
# For Clang to find NDK tools in preference of those system-wide
env.PrependENVPath('PATH', tools_path)
env['CC'] = compiler_path + '/clang'
env['CXX'] = compiler_path + '/clang++'
env['AR'] = tools_path + "/ar"
env['RANLIB'] = tools_path + "/ranlib"
env['AS'] = tools_path + "/as"
if env['android_arch'] == 'x86':
env['ARCH'] = 'arch-x86'
else:
env['ARCH'] = 'arch-arm'
sysroot = env["ANDROID_NDK_ROOT"] + \
"/platforms/" + ndk_platform + "/" + env['ARCH']
common_opts = ['-fno-integrated-as', '-gcc-toolchain', gcc_toolchain_path]
env.Append(CPPFLAGS=["-isystem", sysroot + "/usr/include"])
env.Append(CPPFLAGS=string.split(
'-fpic -ffunction-sections -funwind-tables -fstack-protector-strong -fvisibility=hidden -fno-strict-aliasing'))
env.Append(CPPFLAGS=string.split('-DANDROID -DNO_STATVFS -DGLES2_ENABLED'))
env['neon_enabled'] = False
if env['android_arch'] == 'x86':
can_vectorize = True
target_opts = ['-target', 'i686-none-linux-android']
# The NDK adds this if targeting API < 21, so we can drop it when Godot targets it at least
env.Append(CPPFLAGS=['-mstackrealign'])
elif env["android_arch"] == "armv6":
can_vectorize = False
target_opts = ['-target', 'armv6-none-linux-androideabi']
env.Append(CPPFLAGS=string.split(
'-D__ARM_ARCH_6__ -march=armv6 -mfpu=vfp -mfloat-abi=softfp'))
elif env["android_arch"] == "armv7":
can_vectorize = True
target_opts = ['-target', 'armv7-none-linux-androideabi']
env.Append(CPPFLAGS=string.split(
'-D__ARM_ARCH_7__ -D__ARM_ARCH_7A__ -march=armv7-a -mfloat-abi=softfp'))
if env['android_neon'] == 'yes':
env['neon_enabled'] = True
env.Append(CPPFLAGS=['-mfpu=neon', '-D__ARM_NEON__'])
else:
env.Append(CPPFLAGS=['-mfpu=vfpv3-d16'])
env.Append(CPPFLAGS=target_opts)
env.Append(CPPFLAGS=common_opts)
env.Append(LIBS=['OpenSLES'])
env.Append(LIBS=['EGL', 'OpenSLES', 'android'])
env.Append(LIBS=['log', 'GLESv1_CM', 'GLESv2', 'z'])
if (sys.platform.startswith("darwin")):
env['SHLIBSUFFIX'] = '.so'
env['LINKFLAGS'] = ['-shared', '--sysroot=' +
sysroot, '-Wl,--warn-shared-textrel']
env.Append(LINKFLAGS=string.split(
'-Wl,--fix-cortex-a8'))
env.Append(LINKFLAGS=string.split(
'-Wl,--no-undefined -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now'))
env.Append(LINKFLAGS=string.split(
'-Wl,-soname,libgodot_android.so -Wl,--gc-sections'))
if mt_link:
env.Append(LINKFLAGS=['-Wl,--threads'])
env.Append(LINKFLAGS=target_opts)
env.Append(LINKFLAGS=common_opts)
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] + '/toolchains/arm-linux-androideabi-4.9/prebuilt/' +
host_subpath + '/lib/gcc/' + abi_subpath + '/4.9.x'])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] +
'/toolchains/arm-linux-androideabi-4.9/prebuilt/' + host_subpath + '/' + abi_subpath + '/lib'])
if (env["target"].startswith("release")):
env.Append(LINKFLAGS=['-O2'])
env.Append(CPPFLAGS=['-O2', '-DNDEBUG', '-ffast-math',
'-funsafe-math-optimizations', '-fomit-frame-pointer'])
if (can_vectorize):
env.Append(CPPFLAGS=['-ftree-vectorize'])
if (env["target"] == "release_debug"):
env.Append(CPPFLAGS=['-DDEBUG_ENABLED'])
elif (env["target"] == "debug"):
env.Append(LINKFLAGS=['-O0'])
env.Append(CPPFLAGS=['-O0', '-D_DEBUG', '-UNDEBUG', '-DDEBUG_ENABLED',
'-DDEBUG_MEMORY_ENABLED', '-g', '-fno-limit-debug-info'])
env.Append(CPPFLAGS=['-DANDROID_ENABLED',
'-DUNIX_ENABLED', '-DNO_FCNTL', '-DMPC_FIXED_POINT'])
# TODO: Move that to opus module's config
if("module_opus_enabled" in env and env["module_opus_enabled"] != "no"):
if (env["android_arch"] == "armv6" or env["android_arch"] == "armv7"):
env.Append(CFLAGS=["-DOPUS_ARM_OPT"])
env.opus_fixed_point = "yes"
if (env['android_stl'] == 'yes'):
env.Append(CPPPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/include"])
env.Append(CPPPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/libs/" + arch_subpath + "/include"])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] +
"/sources/cxx-stl/gnu-libstdc++/4.9/libs/" + arch_subpath])
env.Append(LIBS=["gnustl_static"])
else:
env.Append(CXXFLAGS=['-fno-rtti', '-fno-exceptions', '-DNO_SAFE_CAST'])
import methods
env.Append(BUILDERS={'GLSL120': env.Builder(
action=methods.build_legacygl_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL': env.Builder(
action=methods.build_glsl_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL120GLES': env.Builder(
action=methods.build_gles2_headers, suffix='glsl.gen.h', src_suffix='.glsl')})
env.use_windows_spawn_fix()
| mit | 7,101,317,733,062,213,000 | 35.467925 | 119 | 0.561465 | false |
Endika/connector-accountedge | hr_expense_accountedge/hr_expense_accountedge.py | 1 | 8226 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from osv import orm, fields
from datetime import datetime
class hr_expense_expense(orm.Model):
_inherit = 'hr.expense.expense'
def _create_csv_report(self, cr, uid, ids, context={}):
res = {}
for id in ids:
this = self.browse(cr, uid, id)
output = this.employee_id.name
output += "\r\n"
output += "Employee\tCard ID\tDate\tVendor Invoice #\tAccount Number\tAmount\tDescription\
\tTax Code\tCurrency Code\tExchange Rate\r\n"
# Comment the previous line and uncomment the next one
# if you want to import taxes with their amount, instead of their code
# \tTax Code\tGST Amount\tPST/QST Amount\tCurrency Code\tExchange Rate\r\n"
for l in this.line_ids:
taxes = self._compute_taxes(cr, uid, l, context)
# output += u"%s\t%s\t%s\t%s\t%s\t%.2f\t%s\t%s\t%.2f\t%.2f\t%s\t%.2f\r\n" % (
output += u"%s\t%s\t%s\t%s\t%s\t%.2f\t%s\t%s\t%s\t%.2f\r\n" % (
this.employee_id.name,
this.employee_id.supplier_id_accountedge,
datetime.today().strftime("%d-%m-%Y"),
l.expense_id.id,
l.account_id.code,
taxes['amount_before_tax'],
l.name,
(l.tax_id.tax_code_accountedge or '000'),
# Comment the previous line and uncomment the next two ones
# if you want to import taxes with their amount, instead of their code
# taxes['amount_gst'],
# taxes['amount_pst'],
(l.expense_id.currency_id.name or 'CAD'),
(float(l.expense_id.currency_id.rate) or '1.0'))
byte_string = output.encode('utf-8-sig')
res[id] = base64.encodestring(byte_string)
self.write(cr, uid, ids, {'csv_file': res[id]}, context=context)
self._add_attachment(cr, uid, id, byte_string, context)
return True
def _compute_taxes(self, cr, uid, expense_line, context={}):
res = {
'amount_before_tax': expense_line.total_amount,
'amount_gst': 0.0, # Goods and Services Tax, federal
'amount_pst': 0.0 # Provincial Sales Tax
}
tax = expense_line.tax_id
if not tax.amount:
return res
# Divide tax per two?
tax_factor = 1.0
if expense_line.account_id.tax_halftax:
tax_factor = 0.5
if tax.child_ids:
for child_tax in tax.child_ids: # TODO: the detection of the two taxes should be more reliable
if 'TPS' in child_tax.name or 'GST' in child_tax.name:
res['amount_gst'] = float(child_tax.amount) * tax_factor
else:
res['amount_pst'] = float(child_tax.amount) * tax_factor
else:
res['amount_gst'] = float(tax.amount)
res['amount_before_tax'] = expense_line.total_amount / (1 + res['amount_gst'] + res['amount_pst'])
res['amount_gst'] = res['amount_before_tax'] * res['amount_gst']
res['amount_pst'] = res['amount_before_tax'] * res['amount_pst']
return res
def _add_attachment(self, cr, uid, ids, content, context={}):
file_name = 'export_'+time.strftime('%Y%m%d_%H%M%S')+'.tsv'
self.pool.get('ir.attachment').create(cr, uid, {
'name': file_name,
'datas': base64.encodestring(content),
'datas_fname': file_name,
'res_model': self._name,
'res_id': ids,
},
context=context
)
return True
def action_exported(self, cr, uid, ids, *args):
if not len(ids):
return False
# Employee must have a recordID matching his supplier account
# in Accountedge to generate an expense sheet
for id in ids:
this = self.browse(cr, uid, id)
if not this.employee_id.supplier_id_accountedge:
raise orm.except_orm(
'Accountedge Supplier ID missing',
'Please add the Accountedge supplier ID on the employee before exporting the sheet.'
)
self._create_csv_report(cr, uid, ids, {})
self.write(cr, uid, ids, {'state': 'exported'})
return True
def action_imported(self, cr, uid, ids, *args):
if not len(ids):
return False
for id in ids:
self.write(cr, uid, ids, {'state': 'imported'})
return True
def _get_cur_account_manager(self, cr, uid, ids, field_name, arg, context):
res = {}
for id in ids:
emails = ''
grp_ids = self.pool.get('res.groups').search(
cr, uid, [
('name', '=', u'Manager'),
('category_id.name', '=', u'Accounting & Finance')
]
)
usr_ids = self.pool.get('res.users').search(cr, uid, [('groups_id', '=', grp_ids[0])])
usrs = self.pool.get('res.users').browse(cr, uid, usr_ids)
for user in usrs:
if user.user_email:
emails += user.user_email
emails += ','
else:
empl_id = self.pool.get('hr.employee').search(cr, uid, [('login', '=', user.login)])[0]
empl = self.pool.get('hr.employee').browse(cr, uid, empl_id)
if empl.work_email:
emails += empl.work_email
emails += ','
emails = emails[:-1]
res[id] = emails
return res
_columns = {
'manager': fields.function(_get_cur_account_manager, string='Manager', type='char', size=128, readonly=True),
'state': fields.selection([
('draft', 'New'),
('confirm', 'Waiting Approval'),
('accepted', 'Approved'),
('exported', 'Exported'),
('imported', 'Imported'),
('cancelled', 'Refused'), ],
'State', readonly=True,
help="When the expense request is created the state is 'Draft'.\n"
"It is confirmed by the user and request is sent to admin, the state is 'Waiting Confirmation'.\n"
"If the admin accepts it, the state is 'Accepted'.\n"
"If the admin refuses it, the state is 'Refused'.\n"
"If a csv file has been generated for the expense request, the state is 'Exported'.\n"
"If the expense request has been imported in AccountEdge, the state is 'Imported'."
),
}
class hr_expense_line(orm.Model):
_inherit = 'hr.expense.line'
def _get_parent_state(self, cr, uid, ids, field_name, arg, context):
res = {}
for id in ids:
expense_line = self.pool.get('hr.expense.line').browse(cr, uid, id)
res[id] = expense_line.expense_id.state
return res
_columns = {
'state': fields.function(_get_parent_state, string='Expense State', type='char', size=128, readonly=True),
}
| agpl-3.0 | -4,562,259,662,920,168,000 | 40.336683 | 117 | 0.531972 | false |
JakeWharton/mkvdts2ac3 | test.py | 1 | 1757 | #!/usr/bin/env python
import sys
if sys.version_info < (2, 3):
raise RuntimeError('Python 2.3+ is required.')
import logging
import os
import shutil
import subprocess
import unittest
import tests
TEST_FILE_NAME = 'test.mkv'
BASE_PATH = os.path.dirname(__file__)
WORK_PATH = os.path.join(BASE_PATH, 'work')
TEST_FILE = os.path.join(BASE_PATH, TEST_FILE_NAME)
def main():
if os.path.exists(WORK_PATH):
shutil.rmtree(WORK_PATH)
os.mkdir(WORK_PATH)
if not os.path.exists(TEST_FILE):
raise ValueError('Could not locate test file.')
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromModule(tests)
]))
shutil.rmtree(WORK_PATH)
if __name__ == '__main__':
main()
class Base(unittest.TestCase):
def setUp(self):
self.work_path = os.path.join(WORK_PATH, self.__class__.__name__)
self.test_file = os.path.join(self.work_path, TEST_FILE_NAME)
if os.path.exists(self.work_path):
raise ValueError('Work path "%s" already exists.' % self.work_path)
os.mkdir(self.work_path)
shutil.copyfile(TEST_FILE, self.test_file)
def test_file_exists(self):
self.assertTrue(os.path.exists(self.test_file))
def test_file_valid(self):
output = subprocess.Popen(['mkvmerge', '-i', TEST_FILE_NAME], cwd=self.work_path, stdout=subprocess.PIPE).communicate()[0]
output = output.replace('\r', '').strip()
self.assertEquals(output, '''File 'test.mkv': container: Matroska\nTrack ID 1: video (V_MPEG4/ISO/AVC)\nTrack ID 2: audio (A_DTS)\nTrack ID 3: subtitles (S_TEXT/UTF8)\nTrack ID 4: subtitles (S_TEXT/UTF8)''')
def tearDown(self):
shutil.rmtree(self.work_path)
| apache-2.0 | -8,740,686,622,334,408,000 | 28.779661 | 215 | 0.659078 | false |
srcole/qwm | usa_map/map_util.py | 1 | 4006 | """
map_util.py
Visualizing US state data on a geographical colormap
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap as Basemap
from matplotlib.colors import rgb2hex
from matplotlib.patches import Polygon
def usa_state_colormap(state_dict, title='', colorbar_title=''):
"""
Plot data as a function of US state onto a geographical colormap
Parameters
----------
state_dict : dict
Keys are states, and values are the feature value to be converted to color
title : str
Title of plot
colorbar_title : str
Colorbar axis label
Code adapted from:
https://stackoverflow.com/questions/39742305/how-to-use-basemap-python-to-plot-us-with-50-states
Required shape files (st9_d00...) acquired from:
https://github.com/matplotlib/basemap/tree/master/examples
"""
# Lambert Conformal map of lower 48 states.
plt.figure(figsize=(10,8))
m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
# draw state boundaries.
# data from U.S Census Bureau
# http://www.census.gov/geo/www/cob/st2000.html
shp_info = m.readshapefile('st99_d00','states',drawbounds=True)
# choose a color for each state based on population density.
colors={}
statenames=[]
cmap = plt.cm.viridis # use 'hot' colormap
vmin = np.min(list(state_dict.values()))
vmax = np.max(list(state_dict.values()))
for shapedict in m.states_info:
statename = shapedict['NAME']
# skip DC and Puerto Rico.
if statename not in ['District of Columbia','Puerto Rico']:
pop = state_dict[statename]
# calling colormap with value between 0 and 1 returns
# rgba value. Invert color range (hot colors are high
# population), take sqrt root to spread out colors more.
colors[statename] = cmap((pop-vmin)/(vmax-vmin))[:3]
statenames.append(statename)
# cycle through state names, color each one.
ax = plt.gca() # get current axes instance
for nshape,seg in enumerate(m.states):
# skip DC and Puerto Rico.
if statenames[nshape] not in ['Puerto Rico', 'District of Columbia']:
# Offset Alaska and Hawaii to the lower-left corner.
if statenames[nshape] == 'Alaska':
# Alaska is too big. Scale it down to 35% first, then transate it.
seg = list(map(alaska_transform, seg))
if statenames[nshape] == 'Hawaii':
seg = list(map(hawaii_transform, seg))
color = rgb2hex(colors[statenames[nshape]])
poly = Polygon(seg,facecolor=color,edgecolor=color)
ax.add_patch(poly)
plt.title(title, size=15)
# Make colorbar
# Make a figure and axes with dimensions as desired.
fig = plt.figure(figsize=(8.5, 1))
ax1 = fig.add_axes([0.05, 0.4, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label(colorbar_title, size=15)
return ax
def alaska_transform(xy):
"""Transform Alaska's geographical placement so fits on US map"""
x, y = xy
return (0.3*x + 1000000, 0.3*y-1100000)
def hawaii_transform(xy):
"""Transform Hawaii's geographical placement so fits on US map"""
x, y = xy
return (x + 5250000, y-1400000)
| mit | -7,728,630,059,513,854,000 | 36.439252 | 100 | 0.638293 | false |
protwis/protwis | signprot/migrations/0008_auto_20200829_1739.py | 1 | 3041 | # Generated by Django 3.0.3 on 2020-08-29 15:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0003_citation_page_name'),
('structure', '0028_auto_20200829_1704'),
('protein', '0009_auto_20200511_1818'),
('signprot', '0007_auto_20190711_1811'),
]
operations = [
migrations.RemoveField(
model_name='signprotstructure',
name='PDB_code',
),
migrations.AddField(
model_name='signprotstructure',
name='pdb_code',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='common.WebLink'),
preserve_default=False,
),
migrations.AddField(
model_name='signprotstructure',
name='publication',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='common.Publication'),
),
migrations.AddField(
model_name='signprotstructure',
name='publication_date',
field=models.DateField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name='signprotstructure',
name='stabilizing_agents',
field=models.ManyToManyField(to='structure.StructureStabilizingAgent'),
),
migrations.AddField(
model_name='signprotstructure',
name='structure_type',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='structure.StructureType'),
preserve_default=False,
),
migrations.CreateModel(
name='SignprotStructureExtraProteins',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=20)),
('note', models.CharField(max_length=50, null=True)),
('chain', models.CharField(max_length=1)),
('category', models.CharField(max_length=20)),
('wt_coverage', models.IntegerField(null=True)),
('protein_conformation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protein.ProteinConformation')),
('structure', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='signprot.SignprotStructure')),
('wt_protein', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protein.Protein')),
],
options={
'db_table': 'signprot_extra_proteins',
},
),
migrations.AddField(
model_name='signprotstructure',
name='extra_proteins',
field=models.ManyToManyField(related_name='extra_proteins', to='signprot.SignprotStructureExtraProteins'),
),
]
| apache-2.0 | 4,604,085,215,076,593,000 | 41.830986 | 150 | 0.599474 | false |
Fizzadar/pyinfra | pyinfra/api/connectors/winrm.py | 1 | 7851 | from __future__ import print_function, unicode_literals
import base64
import ntpath
import click
from pyinfra import logger
from pyinfra.api import Config
from pyinfra.api.exceptions import ConnectError, PyinfraError
from pyinfra.api.util import get_file_io, memoize, sha1_hash
from .pyinfrawinrmsession import PyinfraWinrmSession
from .util import make_win_command
def _raise_connect_error(host, message, data):
message = '{0} ({1})'.format(message, data)
raise ConnectError(message)
@memoize
def show_warning():
logger.warning('The @winrm connector is alpha!')
def _make_winrm_kwargs(state, host):
kwargs = {
}
for key, value in (
('username', host.data.winrm_user),
('password', host.data.winrm_password),
('winrm_port', int(host.data.winrm_port or 0)),
('winrm_transport', host.data.winrm_transport or 'plaintext'),
('winrm_read_timeout_sec', host.data.winrm_read_timeout_sec or 30),
('winrm_operation_timeout_sec', host.data.winrm_operation_timeout_sec or 20),
):
if value:
kwargs[key] = value
# FUTURE: add more auth
# pywinrm supports: basic, certificate, ntlm, kerberos, plaintext, ssl, credssp
# see https://github.com/diyan/pywinrm/blob/master/winrm/__init__.py#L12
return kwargs
def make_names_data(hostname):
show_warning()
yield '@winrm/{0}'.format(hostname), {'winrm_hostname': hostname}, []
def connect(state, host):
'''
Connect to a single host. Returns the winrm Session if successful.
'''
kwargs = _make_winrm_kwargs(state, host)
logger.debug('Connecting to: %s (%s)', host.name, kwargs)
# Hostname can be provided via winrm config (alias), data, or the hosts name
hostname = kwargs.pop(
'hostname',
host.data.winrm_hostname or host.name,
)
try:
# Create new session
host_and_port = '{}:{}'.format(hostname, host.data.winrm_port)
logger.debug('host_and_port: %s', host_and_port)
session = PyinfraWinrmSession(
host_and_port,
auth=(
kwargs['username'],
kwargs['password'],
),
transport=kwargs['winrm_transport'],
read_timeout_sec=kwargs['winrm_read_timeout_sec'],
operation_timeout_sec=kwargs['winrm_operation_timeout_sec'],
)
return session
# TODO: add exceptions here
except Exception as e:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
logger.debug('%s', e)
_raise_connect_error(host, 'Authentication error', auth_args)
def run_shell_command(
state, host, command,
env=None,
success_exit_codes=None,
print_output=False,
print_input=False,
return_combined_output=False,
shell_executable=Config.SHELL,
**ignored_command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
success_exit_codes (list): all values in the list that will return success
print_output (boolean): print the output
print_intput (boolean): print the input
return_combined_output (boolean): combine the stdout and stderr lists
shell_executable (string): shell to use - 'cmd'=cmd, 'ps'=powershell(default)
env (dict): environment variables to set
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_win_command(command)
logger.debug('Running command on %s: %s', host.name, command)
if print_input:
click.echo('{0}>>> {1}'.format(host.print_prefix, command), err=True)
# get rid of leading/trailing quote
tmp_command = command.strip("'")
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, command),
err=True,
)
if not shell_executable:
shell_executable = 'ps'
logger.debug('shell_executable:%s', shell_executable)
# we use our own subclassed session that allows for env setting from open_shell.
if shell_executable in ['cmd']:
response = host.connection.run_cmd(tmp_command, env=env)
else:
response = host.connection.run_ps(tmp_command, env=env)
return_code = response.status_code
logger.debug('response:%s', response)
std_out_str = response.std_out.decode('utf-8')
std_err_str = response.std_err.decode('utf-8')
# split on '\r\n' (windows newlines)
std_out = std_out_str.split('\r\n')
std_err = std_err_str.split('\r\n')
logger.debug('std_out:%s', std_out)
logger.debug('std_err:%s', std_err)
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, '\n'.join(std_out)),
err=True,
)
if success_exit_codes:
status = return_code in success_exit_codes
else:
status = return_code == 0
logger.debug('Command exit status: %s', status)
if return_combined_output:
std_out = [('stdout', line) for line in std_out]
std_err = [('stderr', line) for line in std_err]
return status, std_out + std_err
return status, std_out, std_err
def get_file(
state, host, remote_filename, filename_or_io,
**command_kwargs
):
raise PyinfraError('Not implemented')
def _put_file(state, host, filename_or_io, remote_location, chunk_size=2048):
# this should work fine on smallish files, but there will be perf issues
# on larger files both due to the full read, the base64 encoding, and
# the latency when sending chunks
with get_file_io(filename_or_io) as file_io:
data = file_io.read()
for i in range(0, len(data), chunk_size):
chunk = data[i:i + chunk_size]
ps = (
'$data = [System.Convert]::FromBase64String("{0}"); '
'{1} -Value $data -Encoding byte -Path "{2}"'
).format(
base64.b64encode(chunk).decode('utf-8'),
'Set-Content' if i == 0 else 'Add-Content',
remote_location)
status, _stdout, stderr = run_shell_command(state, host, ps)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
return True
def put_file(
state, host, filename_or_io, remote_filename,
print_output=False, print_input=False,
**command_kwargs
):
'''
Upload file by chunking and sending base64 encoded via winrm
'''
# Always use temp file here in case of failure
temp_file = ntpath.join(
host.fact.windows_temp_dir(),
'pyinfra-{0}'.format(sha1_hash(remote_filename)),
)
if not _put_file(state, host, filename_or_io, temp_file):
return False
# Execute run_shell_command w/sudo and/or su_user
command = 'Move-Item -Path {0} -Destination {1} -Force'.format(temp_file, remote_filename)
status, _, stderr = run_shell_command(
state, host, command,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
if print_output:
click.echo(
'{0}file uploaded: {1}'.format(host.print_prefix, remote_filename),
err=True,
)
return True
EXECUTION_CONNECTOR = True
| mit | -1,413,492,209,220,742,400 | 28.851711 | 94 | 0.61075 | false |
silmeth/Simple-Library-Manager | server/slm_db_interface/views.py | 1 | 10693 | from django.shortcuts import render
from django.http import HttpResponse
from django.db import transaction, connection
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from slm_db_interface.models import Book, Author, Publisher, Borrower, SLMUser
from bisect import bisect_left # to do binary search on sorted lists
import re
import json
def create_json_from_books(books, additional_dic=None):
books_list = []
for i, book in enumerate(books):
book_dict = {
'title': book.title,
'author': book.author.name,
'author_id': book.author.id,
'isbn10': book.isbn10,
'isbn13': book.isbn13,
'publisher': book.publisher.name,
'publisher_id': book.publisher.id,
'pub_date': book.published_year,
'book_id': book.id
}
if additional_dic is not None: # add additional non-standard fields
for key in additional_dic[i]:
book_dict[key] = additional_dic[i][key]
books_list.append(book_dict)
return json.JSONEncoder(indent=2, ensure_ascii=False).encode(books_list)
def create_book_from_json(json_obj):
book = None
book_author = None
book_publisher = None
with transaction.atomic():
json_decoded = json.JSONDecoder().decode(json_obj)
if 'author' in json_decoded and json_decoded['author'] is None:
return None
elif 'author_new' in json_decoded and json_decoded['author_new']:
book_author = Author(name=json_decoded['author_name'])
book_author.save()
elif 'author_id' in json_decoded:
book_author = Author.objects.get(id=json_decoded['author_id'])
if 'publisher' in json_decoded and json_decoded['publisher'] is None:
return None
elif 'publisher_new' in json_decoded and json_decoded['publisher_new']:
book_publisher = Publisher(name=json_decoded['publisher_name'])
book_publisher.save()
elif 'publisher_id' in json_decoded:
book_publisher = Publisher.objects.get(id=json_decoded['publisher_id'])
if 'title' not in json_decoded:
return None
book = Book(title=json_decoded['title'], author=book_author, publisher=book_publisher,
borrower=None, borrow_date=None, return_date=None)
if 'isbn10' in json_decoded:
book.isbn10 = json_decoded['isbn10']
if 'isbn13' in json_decoded:
book.isbn13 = json_decoded['isbn13']
if 'pub_date' in json_decoded:
book.published_year = json_decoded['pub_date']
book.save()
return book
def create_3grams(s):
assert(type(s) is str)
list_3grams = []
for pos in range(len(s)-2):
list_3grams.append(s[pos:pos+3])
list_3grams.sort()
return list_3grams
def compare_3grams(first, second): # Jaccard's similarity
assert(type(first) is list and type(second) is list)
intersect = 0
len1 = len(first)
len2 = len(second)
for val in first: # find number of elements in the intersection of two lists of 3-grams
pos = bisect_left(second, val, 0, len2)
if pos != len2 and second[pos] == val:
intersect += 1
return float(intersect)/(len1+len2-intersect)
def get_books_by_isbn(request, isbn): # no login required ATM, may change
sisbn = str(isbn)
results = None
if len(sisbn) == 10:
results = Book.objects.filter(isbn10=sisbn)
elif len(sisbn) == 13:
results = Book.objects.filter(isbn13=sisbn)
return HttpResponse(content=create_json_from_books(results), content_type='application/json; charset=utf-8')
def search(request, attr, attr_val): # no login required ATM, may change
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
attr_val = regexp_whitespace.sub(' ', attr_val.lower())
attr_val = regexp_punctuation.sub('', attr_val)
query_3grams = create_3grams(attr_val)
results = []
similarities = []
for book in Book.objects.all():
if attr == 'title':
book_attr_val = book.title.lower()
elif attr == 'author':
book_attr_val = book.author.name.lower()
else:
return HttpResponse(content='cannot search by this attribute', status=404)
book_attr_val = regexp_whitespace.sub(' ', book_attr_val)
book_attr_val = regexp_punctuation.sub('', book_attr_val)
book_3grams = create_3grams(book_attr_val)
similarity = compare_3grams(query_3grams, book_3grams)
if similarity > 0.21:
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, book)
similarities.insert(pos, similarity)
sim_dic_list = []
for sim in similarities:
sim_dic_list.append({'similarity': sim})
return HttpResponse(content=create_json_from_books(results[::-1], sim_dic_list[::-1]),
content_type='application/json; charset=utf-8')
def search_authors(request, name):
if request.user.is_authenticated():
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
name = regexp_whitespace.sub(' ', name.lower())
name = regexp_punctuation.sub('', name)
query_3grams = create_3grams(name)
results = []
similarities = []
for author in Author.objects.all():
result = author.name.lower()
result = regexp_whitespace.sub(' ', result)
result = regexp_punctuation.sub('', result)
result_3grams = create_3grams(result)
similarity = compare_3grams(query_3grams, result_3grams)
if similarity > 0.21:
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, author)
similarities.insert(pos, similarity)
results = results[::-1]
similarities = similarities[::-1]
json_results_list = []
for i, res in enumerate(results):
json_results_list.append({'name': res.name, 'author_id': res.id, 'similarity': similarities[i]})
json_results = json.JSONEncoder(indent=2, ensure_ascii=False).encode(json_results_list)
return HttpResponse(content=json_results,
content_type='application/json; charset=utf-8')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain') # TODO change to error dict
def search_publishers(request, name):
if request.user.is_authenticated():
regexp_whitespace = re.compile('\s+')
regexp_punctuation = re.compile('[^\w\s]+')
name = regexp_whitespace.sub(' ', name.lower())
name = regexp_punctuation.sub('', name)
query_3grams = create_3grams(name)
results = []
similarities = []
for publisher in Publisher.objects.all():
result = publisher.name.lower()
result = regexp_whitespace.sub(' ', result)
result = regexp_punctuation.sub('', result)
result_3grams = create_3grams(result)
similarity = compare_3grams(query_3grams, result_3grams)
# if similarity > 0.21: # listing all publishers makes more sense
pos = bisect_left(similarities, similarity, 0, len(similarities))
results.insert(pos, publisher)
similarities.insert(pos, similarity)
results = results[::-1]
similarities = similarities[::-1]
json_results_list = []
for i, res in enumerate(results):
json_results_list.append({'name': res.name, 'publisher_id': res.id, 'similarity': similarities[i]})
json_results = json.JSONEncoder(indent=2, ensure_ascii=False).encode(json_results_list)
return HttpResponse(content=json_results,
content_type='application/json; charset=utf-8')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain')
@csrf_exempt
def add_book(request):
if request.user.is_authenticated():
if request.user.slm_user.can_manage_books:
# book data comes in json through a POST request
if request.method == 'POST':
try:
print(request.body.decode('utf8'))
book = create_book_from_json(request.body.decode('utf8'))
return HttpResponse(content=create_json_from_books([book]),
content_type='application/json; charset=utf-8')
except ValueError as err: # TODO change to error dict
return HttpResponse(
content='error: request not a valid json\n' + str(err),
content_type='text/plain'
)
else:
return HttpResponse(content='error: something went wrong', content_type='text/plain')
else:
return HttpResponse(content='error: lack of manage book permission')
else:
return HttpResponse(content='error: not authenticated', content_type='text/plain')
@csrf_exempt
def log_user_in(request):
if request.method == 'POST':
try:
credentials = json.JSONDecoder().decode(request.body.decode('utf8'))
user = authenticate(username=credentials['username'], password=credentials['password'])
if user is not None:
if user.is_active:
login(request, user)
resp_json = {'logged_in': True,
'username': str(user)}
if user.slm_user.can_manage_books:
resp_json['can_manage_books'] = True
if user.slm_user.can_lend:
resp_json['can_lend'] = True
if user.slm_user.can_borrow:
resp_json['can_borrow'] = True
resp = json.JSONEncoder(indent=2, ensure_ascii=False).encode(resp_json)
return HttpResponse(content=resp, content_type='application/json; charset=utf-8')
else: # TODO change to error dict
return HttpResponse(content='error: user inactive', content_type='text/plain')
else:
return HttpResponse(content='error: wrong credentials', content_type='text/plain')
except ValueError:
return HttpResponse(content='error: request not a valid json', content_type='text/plain')
| lgpl-3.0 | 4,883,945,105,540,650,000 | 38.899254 | 119 | 0.600673 | false |
shanot/imp | modules/core/test/test_transform_particles.py | 2 | 1094 | from __future__ import print_function
import IMP
import IMP.core
import IMP.algebra
import IMP.test
class Tests(IMP.test.TestCase):
"""Test particle transformations"""
def test_transformation(self):
"""Test the TransformationFunction class"""
imp_model = IMP.Model()
particles = IMP.core.create_xyzr_particles(imp_model, 4, 1.0)
coords = [x.get_coordinates() for x in particles]
r = IMP.algebra.get_rotation_from_fixed_xyz(0.2, 0.8, -0.4)
t = IMP.algebra.Transformation3D(
r, IMP.algebra.Vector3D(20.0, -12.4, 18.6))
print("create transform")
tf = IMP.core.Transform(t)
tf.set_was_used(True)
for p in particles:
print("applying to " + str(p))
r = tf.apply_index(imp_model, p)
for i in range(0, len(particles)):
v = particles[i].get_coordinates()
self.assertAlmostEqual(
(v - t.get_transformed(coords[i])).get_magnitude(),
0,
delta=0.01)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | 4,263,365,101,596,354,600 | 30.257143 | 69 | 0.575868 | false |
davidjrichardson/uwcs-zarya | newsletter/tasks.py | 1 | 2020 | import gc
from celery.decorators import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.urls import reverse
from blog.models import Sponsor
from .models import Subscription, Mail
def mail_newsletter(recipients, mail):
email_context = {
'title': mail.subject,
'message': mail.text,
'base_url': settings.EMAIL_ABS_URL,
'sponsors': Sponsor.objects.all(),
}
email_html = render_to_string('newsletter/email_newsletter.html', email_context)
email_plaintext = render_to_string('newsletter/email_newsletter.txt', email_context)
to = [x.email for x in recipients]
# Create a map of emails to unsub tokens for the email merge
unsub_tokens = {recipient.email: {
'unsub_url': '{hostname}{path}'.format(hostname=settings.EMAIL_ABS_URL,
path=reverse('unsub_with_id', kwargs={
'token': recipient.unsubscribe_token
}))} for recipient in recipients}
sender = '{name} <{email}>'.format(name=mail.sender_name, email=mail.sender_email)
email = EmailMultiAlternatives(mail.subject, email_plaintext, sender, to)
email.attach_alternative(email_html, 'text/html')
email.merge_data = unsub_tokens
email.merge_global_data = {
'subject': mail.subject
}
email.template_id = '615bcf44-fdfd-4632-8403-38987eb9074b'
email.send()
# Create a function called "chunks" with two arguments, l and n:
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
@task(name='send_newsletter')
def send_newsletter(mail_id):
subscriptions = chunks(Subscription.objects.all(), 100)
for chunk in subscriptions:
mail_newsletter(chunk, Mail.objects.get(id=mail_id))
| mit | -830,727,445,206,895,400 | 37.113208 | 88 | 0.64703 | false |
biblepay/biblepay | test/functional/llmq-simplepose.py | 1 | 5396 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Däsh Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.test_framework import BiblePayTestFramework
from test_framework.util import *
'''
llmq-simplepose.py
Checks simple PoSe system based on LLMQ commitments
'''
class LLMQSimplePoSeTest(BiblePayTestFramework):
def set_test_params(self):
self.set_biblepay_test_params(6, 5, fast_dip3_enforcement=True)
self.set_biblepay_llmq_test_params(5, 3)
def run_test(self):
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# check if mining quorums with all nodes being online succeeds without punishment/banning
self.test_no_banning()
# Now lets isolate MNs one by one and verify that punishment/banning happens
def isolate_mn(mn):
mn.node.setnetworkactive(False)
wait_until(lambda: mn.node.getconnectioncount() == 0)
self.test_banning(isolate_mn, True)
self.repair_masternodes(False)
self.nodes[0].spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
self.wait_for_sporks_same()
self.reset_probe_timeouts()
# Make sure no banning happens with spork21 enabled
self.test_no_banning(expected_connections=4)
# Lets restart masternodes with closed ports and verify that they get banned even though they are connected to other MNs (via outbound connections)
def close_mn_port(mn):
self.stop_node(mn.node.index)
self.start_masternode(mn, ["-listen=0"])
connect_nodes(mn.node, 0)
# Make sure the to-be-banned node is still connected well via outbound connections
for mn2 in self.mninfo:
if mn2 is not mn:
connect_nodes(mn.node, mn2.node.index)
self.reset_probe_timeouts()
self.test_banning(close_mn_port, False)
self.repair_masternodes(True)
self.reset_probe_timeouts()
def force_old_mn_proto(mn):
self.stop_node(mn.node.index)
self.start_masternode(mn, ["-pushversion=70216"])
connect_nodes(mn.node, 0)
self.reset_probe_timeouts()
self.test_banning(force_old_mn_proto, False)
def test_no_banning(self, expected_connections=1):
for i in range(3):
self.mine_quorum(expected_connections=expected_connections)
for mn in self.mninfo:
assert(not self.check_punished(mn) and not self.check_banned(mn))
def test_banning(self, invalidate_proc, expect_contribution_to_fail):
online_mninfos = self.mninfo.copy()
for i in range(2):
mn = online_mninfos[len(online_mninfos) - 1]
online_mninfos.remove(mn)
invalidate_proc(mn)
t = time.time()
while (not self.check_punished(mn) or not self.check_banned(mn)) and (time.time() - t) < 120:
expected_contributors = len(online_mninfos) + 1
if expect_contribution_to_fail:
expected_contributors -= 1
# Make sure we do fresh probes
self.bump_mocktime(60 * 60)
self.mine_quorum(expected_connections=1, expected_members=len(online_mninfos), expected_contributions=expected_contributors, expected_complaints=expected_contributors-1, expected_commitments=expected_contributors, mninfos=online_mninfos)
assert(self.check_punished(mn) and self.check_banned(mn))
def repair_masternodes(self, restart):
# Repair all nodes
for mn in self.mninfo:
if self.check_banned(mn) or self.check_punished(mn):
addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(addr, 0.1)
self.nodes[0].protx('update_service', mn.proTxHash, '127.0.0.1:%d' % p2p_port(mn.node.index), mn.keyOperator, "", addr)
self.nodes[0].generate(1)
assert(not self.check_banned(mn))
if restart:
self.stop_node(mn.node.index)
self.start_masternode(mn)
else:
mn.node.setnetworkactive(True)
connect_nodes(mn.node, 0)
self.sync_all()
# Isolate and re-connect all MNs (otherwise there might be open connections with no MNAUTH for MNs which were banned before)
for mn in self.mninfo:
mn.node.setnetworkactive(False)
wait_until(lambda: mn.node.getconnectioncount() == 0)
mn.node.setnetworkactive(True)
force_finish_mnsync(mn.node)
connect_nodes(mn.node, 0)
def reset_probe_timeouts(self):
# Make sure all masternodes will reconnect/re-probe
self.bump_mocktime(60 * 60 + 1)
self.sync_all()
def check_punished(self, mn):
info = self.nodes[0].protx('info', mn.proTxHash)
if info['state']['PoSePenalty'] > 0:
return True
return False
def check_banned(self, mn):
info = self.nodes[0].protx('info', mn.proTxHash)
if info['state']['PoSeBanHeight'] != -1:
return True
return False
if __name__ == '__main__':
LLMQSimplePoSeTest().main()
| mit | -2,549,316,573,065,459,700 | 38.379562 | 253 | 0.619833 | false |
nasfarley88/pyhabitica | tests/test_task.py | 1 | 1430 | # This is a user supplied file with user credentials
import config
import habiticaapi
import unittest2
import logger
import copy
class TestTask(unittest2.TestCase):
def setUp(self):
"""Setup character."""
self.character = habiticaapi.Character(
config.USERID,
config.APIKEY
)
# TODO set this up to create a task for testing on
self.task = self.character.get_all_tasks()[0]
self.habit = self.character.get_specific_tasks(
type__contains = "habit"
)[0]
# This *must* be an independent copy otherwise when it is changed, and
# I push it back, it will be the changed character
self.character_to_restore = copy.deepcopy(self.character)
def tearDown(self):
"""Tear down character."""
self.character_to_restore.push()
def test_task___init___return_None(self):
assert self.task
def test_pull_return_None(self):
task = self.task.pull()
def test_push_return_None(self):
task = self.task.push()
def test__task_direction_return_None(self):
task_up = self.habit._task_direction("up")
task_down = self.habit._task_direction("down")
def test_up_return_None(self):
task = self.habit.up()
def test_down_return_None(self):
task = self.habit.down()
# TODO set up a test for delete once I have a working create
| cc0-1.0 | -2,569,176,016,536,914,000 | 28.183673 | 78 | 0.625175 | false |
ianmiell/shutit-distro | screen/screen.py | 1 | 1148 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class screen(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/screen')
shutit.send('cd /tmp/build/screen')
shutit.send('wget -qO- http://ftp.gnu.org/gnu/screen/screen-4.2.1.tar.gz | tar -zxf -')
shutit.send('cd screen*')
shutit.send('./configure --prefix=/usr --infodir=/usr/share/info --mandir=/usr/share/man --with-socket-dir=/run/screen --with-pty-group=5 --with-sys-screenrc=/etc/screenrc')
shutit.send('sed -i -e "s%/usr/local/etc/screenrc%/etc/screenrc%" {etc,doc}/*')
shutit.send('make')
shutit.send('make install')
shutit.send('install -m 644 etc/etcscreenrc /etc/screenrc')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/screen')
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return screen(
'shutit.tk.sd.screen.screen', 158844782.0237,
description='',
maintainer='',
depends=['shutit.tk.setup']
)
| gpl-2.0 | 4,438,800,849,325,293,000 | 26.333333 | 175 | 0.682927 | false |
PROGRAM-IX/vectorwars | vw_game_engine.py | 1 | 10587 | import pygame
from pygame.locals import *
from pystroke.hud import *
from pystroke.game_engine import GameEngine
from pystroke.vector2 import Vector2
from pystroke.vex import Vex
from pystroke.input_engine import InputEngine
from pystroke.event_engine import EventEngine
from pystroke.draw_engine import DrawEngine
from vw_beh_engine import VWBehaviourEngine
from enemy import gen
from bullet import BulletD, BulletP
from player import Player
from random import randint
class VWGameEngine(GameEngine):
def __init__(self, screen, event_e):
GameEngine.__init__(self, screen, event_e)
self.beh_e = VWBehaviourEngine()
self.FPS = 60
self.player = Player(400, 300, pygame.Color(0, 255, 0),
[Vector2(0, -5), Vector2(-15, -20),
Vector2(-10, 10), Vector2(0, 20), Vector2(10, 10),
Vector2(15, -20), Vector2(0, -5)],
1)
self.combo_ticks = self.FPS*3
self.combo_timer = 0
self.combo_num = 0
self.enemies = []
self.bullets = []
self.score = 0
self.high_score = 0
self.rep_interval = self.FPS * 10 / 3
#self.rep_interval = self.FPS/10
self.rep_count = 1
self.shoot_interval = self.FPS/10
self.shoot_count = 0
self.player_speed = 5
def spawn(self, num):
for i in xrange(num):
x = randint(100, 700)
y = randint(100, 500)
self.enemies.append(gen(x, y))
def reset_game(self):
del self.enemies
self.enemies = []
del self.bullets
self.bullets = []
self.shoot_count = 0
self.combo_timer = 0
self.combo_num = 0
combo = self._hud.get("Combo")
combo.visible = False
if combo is not None:
combo.text = "combo "+str(self.combo_num)
def set_end_screen(self, visible):
self._hud.get("GameOver1").visible = visible
self._hud.get("GameOver2").visible = visible
self._hud.get("GameOver3").visible = visible
def populate(self):
self.spawn(4)
def game_over(self):
self.set_end_screen(True)
self.reset_game()
self.reset_score()
def combo_tick(self):
if self.combo_timer > 0:
self.combo_timer -= 1
else:
self.combo_num = 0
combo = self._hud.get("Combo")
combo.visible = False
if combo is not None:
combo.text = "combo "+str(self.combo_num)
#print self.combo_num, self.combo_timer
def update(self):
p_move_x = 0 # How much the player will move (H)
p_move_y = 0 # How much the player will move (V)
self.event_e.update()
if self.event_e.input.keys[K_ESCAPE] == True:
return 1
elif self.event_e.input.keys[K_q] == True:
return 0
if self.event_e.input.keys[K_SPACE] == True:
self.score_inc(5)
if self.event_e.input.keys[K_c] == True:
self.set_end_screen(False)
self.reset_game()
self.populate()
if self.event_e.input.keys[K_DOWN] == True:
# Fire down
self.player_shoot_dir(0)
elif self.event_e.input.keys[K_UP] == True:
# Fire up
self.player_shoot_dir(2)
elif self.event_e.input.keys[K_LEFT] == True:
# Fire left
self.player_shoot_dir(3)
elif self.event_e.input.keys[K_RIGHT] == True:
# Fire right
self.player_shoot_dir(1)
elif self.event_e.input.mouse_buttons[1] == True:
# Fire towards the mouse cursor
self.player_shoot_point(Vector2(self.event_e.input.mouse_pos[0],
self.event_e.input.mouse_pos[1]))
else:
self.shoot_count = 0
if self.event_e.input.keys[K_w] == True:
# Move up
p_move_y -= self.player_speed
elif self.event_e.input.keys[K_s] == True:
# Move down
p_move_y += self.player_speed
if self.event_e.input.keys[K_a] == True:
# Move left
p_move_x -= self.player_speed
elif self.event_e.input.keys[K_d] == True:
# Move right
p_move_x += self.player_speed
self.player.rotate_to_face_point(Vector2(
self.event_e.input.mouse_pos[0],
self.event_e.input.mouse_pos[1]))
self.beh_e.update(self.enemies, self.player, self.screen)
self.player.move_abs(p_move_x, p_move_y, self.screen)
self.bullet_update()
if len(self.enemies) > 1:
self.rep()
elif len(self.enemies) == 0 and self.score > 0:
self.game_over()
#else:
#self.spawn(4)
self.collide()
self.combo_tick()
self.clock.tick(self.FPS)
return 2
def score_inc(self, pts):
self.combo_timer = self.combo_ticks
self.combo_num += 1
if self.combo_num > 1:
pts = pts * self.combo_num
print "COMBO " + str(self.combo_num)
combo = self._hud.get("Combo")
combo.visible = True
if combo is not None:
combo.text = "combo "+str(self.combo_num)
self.score += 50*pts
sc = self._hud.get("Score")
if sc is not None:
sc.text = "score "+str(self.score)
go = self._hud.get("GameOver2")
if go is not None:
go.text = "score "+str(self.score)
if self.score > self.high_score:
self.high_score = self.score
hsc = self._hud.get("HighScore")
if hsc is not None:
hsc.text = "high score "+str(self.high_score)
def reset_score(self):
print "SCORE RESET FROM", self.score
self.score = 0
sc = self._hud.get("Score")
if(sc is not None):
sc.text = "score "+str(self.score)
def collide(self):
dead_enemies = []
dead_bullets = []
for e in self.enemies:
if e.lifetime >= 30:
for b in self.bullets:
if e.point_inside(Vector2(b.x, b.y)):
#print "COLLIDE2"
self.score_inc(len(e.points))
if e not in dead_enemies:
dead_enemies.append(e)
if b not in dead_bullets:
dead_bullets.append(b)
for e in dead_enemies:
#print self.player.distance_to(Vector2(e.x, e.y))
self.enemies.remove(e)
for b in dead_bullets:
self.bullets.remove(b)
for p in self.player.points:
for e in self.enemies:
if e.lifetime >= 30:
if e.point_inside(p+Vector2(self.player.x, self.player.y)):
self.game_over()
def draw(self):
self.draw_e.begin_draw(pygame.Color(0,0,0))
self.draw_e.draw(self.enemies)
self.draw_e.draw(self.bullets)
self.draw_e.draw([self.player])
self.draw_e.draw([self._hud])
self.draw_e.end_draw()
def run(self):
self._hud.add(HUDPolygon("Box1", pygame.Color(255, 255, 255),
((50, 50), (750, 50),
(750, 550), (50, 550), 2)))
self._hud.add(HUDText("Score", pygame.Color(255, 255, 255),
"score "+str(self.score), (15, 20), 1, 2))
self._hud.add(HUDText("HighScore", pygame.Color(255, 255, 255),
"high score "+str(self.high_score), (15, 575),
1, 2))
self._hud.add(HUDText("GameOver1", pygame.Color(255, 0, 255),
"game over", (100, 200),
5, 2, False))
self._hud.add(HUDText("GameOver2", pygame.Color(255, 0, 255),
"score "+str(self.score),
(200, 300),
2, 2, False))
self._hud.add(HUDText("GameOver3", pygame.Color(255, 0, 255),
"c to restart",
(200, 360),
2, 2, False))
self._hud.add(HUDText("Combo", pygame.Color(255, 255, 255),
"combo "+str(self.combo_num),
(650, 575),
1, 2, True))
self.spawn(4)
while True:
r = self.update()
if r == 0 or r == 1:
return r
self.draw()
def rep(self):
if(self.rep_count % self.rep_interval == 0 and len(self.enemies)>1):
p1 = randint(0, len(self.enemies)-1)
p2 = p1
while (p1 == p2):
p2 = randint(0, len(self.enemies)-1)
if self.enemies[p1].x < self.enemies[p2].x:
x = randint(self.enemies[p1].x, self.enemies[p2].x)
else:
x = randint(self.enemies[p2].x, self.enemies[p1].x)
if self.enemies[p1].y < self.enemies[p2].y:
y = randint(self.enemies[p1].y, self.enemies[p2].y)
else:
y = randint(self.enemies[p2].y, self.enemies[p1].y)
self.enemies.append(
self.enemies[p1].reproduce(self.enemies[p2], x, y))
elif len(self.enemies) < 2:
self.spawn(2)
self.rep_count += 1
#print self.rep_count
def bullet_update(self):
for b in self.bullets:
if b.x > 800 or b.x < 0 or b.y > 600 or b.y < 0:
self.bullets.remove(b)
b.move()
def player_shoot_dir(self, direction):
if self.shoot_count % self.shoot_interval == 0:
b = BulletD(self.player.x, self.player.y, direction)
self.bullets.append(b)
self.shoot_count += 1
def player_shoot_point(self, point):
if self.shoot_count % self.shoot_interval == 0:
b = BulletP(self.player.x, self.player.y, point)
self.bullets.append(b)
self.shoot_count += 1 | mit | 8,833,229,971,831,664,000 | 34.891525 | 79 | 0.489657 | false |
liqd/a4-meinberlin | meinberlin/apps/polls/migrations/0006_copy_poll_data_to_a4_polls.py | 1 | 2716 | # Generated by Django 2.2.24 on 2021-06-18 12:18
from django.db import migrations
def copy_data(apps, schema_editor):
Item = apps.get_model('a4modules', 'Item')
MBPoll = apps.get_model('meinberlin_polls', 'MBPoll')
Poll = apps.get_model('a4polls', 'Poll')
MBQuestion = apps.get_model('meinberlin_polls', 'MBQuestion')
Question = apps.get_model('a4polls', 'Question')
MBChoice = apps.get_model('meinberlin_polls', 'MBChoice')
Choice = apps.get_model('a4polls', 'Choice')
MBVote = apps.get_model('meinberlin_polls', 'MBVote')
Vote = apps.get_model('a4polls', 'Vote')
for mb_poll in MBPoll.objects.all():
item = Item.objects.get(id=mb_poll.item_ptr_id)
poll = Poll(item_ptr_id=mb_poll.item_ptr_id)
poll.__dict__.update(item.__dict__)
poll.save()
mb_questions = MBQuestion.objects.filter(poll=mb_poll)
for mb_question in mb_questions:
question = Question.objects.create(
label = mb_question.label,
weight = mb_question.weight,
multiple_choice = mb_question.multiple_choice,
poll = poll)
mb_choices = MBChoice.objects.filter(question=mb_question)
for mb_choice in mb_choices:
choice = Choice.objects.create(
label = mb_choice.label,
question = question)
mb_votes = MBVote.objects.filter(choice=mb_choice)
for mb_vote in mb_votes:
Vote.objects.create(
created = mb_vote.created,
modified = mb_vote.modified,
creator = mb_vote.creator,
choice = choice)
Comment = apps.get_model('a4comments', 'Comment')
ContentType = apps.get_model('contenttypes', 'ContentType')
mb_poll_content_type = ContentType.objects.get_for_model(MBPoll)
poll_content_type = ContentType.objects.get_for_model(Poll)
comments = Comment.objects.filter(content_type_id=mb_poll_content_type.id)
for comment in comments:
comment.content_type = poll_content_type
comment.save()
Phase = apps.get_model('a4phases', 'Phase')
phases = Phase.objects.filter(type='meinberlin_polls:voting')
for phase in phases:
phase.type='a4polls:voting'
phase.save()
class Migration(migrations.Migration):
dependencies = [
('meinberlin_polls', '0005_rename_mb_poll_models'),
('a4polls', '0001_initial'),
('a4comments', '0007_comment_is_moderator_marked'),
('a4phases', '0007_order_phases_also_by_id')
]
operations = [
migrations.RunPython(copy_data)
]
| agpl-3.0 | -491,148,125,555,496,700 | 38.362319 | 78 | 0.60162 | false |
ulrikpedersen/toggl-gnome-applet | toggl.py | 1 | 5391 | #!/usr/bin/env python
import logging
from datetime import datetime
logging.basicConfig(level=logging.WARNING)
import os
import urllib2, base64, json
import dateutil.parser
def from_ISO8601( str_iso8601 ):
return dateutil.parser.parse(str_iso8601)
def to_ISO8601( timestamp ):
return timestamp.isoformat()
def convert_time_strings(toggl_dicts):
timestamp_fields = ['at',
'created_at',
'start',
'stop']
result = []
for tdict in toggl_dicts:
d = tdict
for tsf in timestamp_fields:
if tdict.has_key(tsf):
d[tsf] = from_ISO8601(tdict[tsf])
result.append(d)
return result
class Toggl:
def __init__(self, api_token=None):
self.log = logging.getLogger("Toggl")
self.log.setLevel(logging.DEBUG)
self.toggl_domain = "www.toggl.com"
self.toggl_api = "https://%s/api/v8/" % self.toggl_domain
self.report_api = "https://%s/reports/api/v2" % self.toggl_domain
self._api_token = api_token
# Search for an Toggl API token in a list of files
# No validation of the collected token
# TODO: encryption of tokenfiles could be nice
tokenfiles = [os.path.expanduser(f) for f in ['.toggltoken', '~/.toggltoken', '~/.togglapplet/.toggltoken']]
for tf in tokenfiles:
if os.path.exists( tf ):
try:
f = open(tf)
self._api_token = f.read().strip()
f.close()
except:
self.log.exception("Could not read token from " + tf)
self._api_token = None
if self._api_token: break
def send_request( self, api_call_url ):
''' Send a request or command to Toggl, retrieve and parse the json response.
returns a list of dictionary objects.
Throws an exception if the http response is not OK (200) or if no JSON can be decoded from the response.
'''
request = urllib2.Request( api_call_url )
self.log.debug("http request url = \'%s\'", request.get_full_url())
# username:password
# Use base64.standard_b64encode instead of replace...
user_pass = base64.encodestring('%s:%s' % (self._api_token, 'api_token')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % user_pass)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'https': 'http://wwwcache.rl.ac.uk:8080'}))
urllib2.install_opener(opener)
result = urllib2.urlopen(request, timeout = 3.0) # with no data, this is a http GET.
self.log.debug("http request result: code=%s url=\'%s\'", result.getcode(), result.geturl())
js = json.load(result)
#self.log.debug("JSON raw result: %s" % json.dumps(js,sort_keys=True, indent=4, separators=(',', ': ')))
return js
def get_workspaces(self):
self.log.debug("get_workspaces()")
js = self.send_request(self.toggl_api + "workspaces")
js = convert_time_strings(js)
return js
def get_default_workspace(self):
self.log.debug("get_default_workspace()")
wid = self.get_user()['default_wid']
js = self.send_request(self.toggl_api + "workspaces/%s"%str(wid))
js = convert_time_strings([js['data']])
return js[0]
def get_default_workspace_id(self):
self.log.debug("get_default_workspace_id()")
ws = self.get_default_workspace()
self.log.debug(ws)
return ws['id']
def get_projects(self, wid=None):
self.log.debug("get_projects(wid=%s)"%str(wid))
if wid:
js = self.send_request(self.toggl_api + "workspaces/%s/projects"%str(wid))
else:
js = []
for w in self.get_workspaces():
js += self.send_request(self.toggl_api + "workspaces/%s/projects"%str(w['id']))
js = convert_time_strings(js)
return js
def get_current_entry(self):
'''get the currently active time entry'''
self.log.debug("get_current_entry()")
js = self.send_request(self.toggl_api + "time_entries/current")
self.log.debug( js )
js = convert_time_strings(js['data'])
return js
def get_range_entries(self, start_end=None):
'''Get a list of entries in a range (max 1000 entries).
If no start-end range is defined, the default is to return all entries
from the last 9 days.
start_end: tuple with start and end date'''
self.log.debug("get_range_entries()")
query = "time_entries"
if start_end:
start, end = start_end
if type(start) == datetime.datetime:
start = to_ISO8601(start)
if type(end) == datetime.datetime:
end = to_ISO8601(end)
query += "?start_date=%s&end_date=%s"%(start, end)
js = self.send_request(self.toggl_api + query)
js = convert_time_strings(js)
return js
def get_user(self):
self.log.debug("get_user()")
js = self.send_request(self.toggl_api + "me")
return js['data']
| unlicense | 6,207,641,037,173,916,000 | 37.241135 | 116 | 0.565758 | false |
JulyKikuAkita/PythonPrac | cs15211/BaseballGame.py | 1 | 3441 | __source__ = 'https://leetcode.com/problems/baseball-game/description/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 682. Baseball Game
#
# You're now a baseball game point recorder.
#
# Given a list of strings, each string can be one of the 4 following types:
#
# Integer (one round's score): Directly represents the number of points you get in this round.
# "+" (one round's score): Represents that the points you get in this round are the sum of the last two valid round's points.
# "D" (one round's score): Represents that the points you get in this round are the doubled data of the last valid round's points.
# "C" (an operation, which isn't a round's score): Represents the last valid round's points you get were invalid and should be removed.
# Each round's operation is permanent and could have an impact on the round before and the round after.
#
# You need to return the sum of the points you could get in all the rounds.
#
# Example 1:
# Input: ["5","2","C","D","+"]
# Output: 15
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get 2 points. The sum is: 7.
# Operation 1: The round 2's data was invalid. The sum is: 5.
# Round 3: You could get 10 points (the round 2's data has been removed). The sum is: 15.
# Round 4: You could get 5 + 10 = 15 points. The sum is: 30.
# Example 2:
# Input: ["5","-2","4","C","D","9","+","+"]
# Output: 27
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get -2 points. The sum is: 3.
# Round 3: You could get 4 points. The sum is: 7.
# Operation 1: The round 3's data is invalid. The sum is: 3.
# Round 4: You could get -4 points (the round 3's data has been removed). The sum is: -1.
# Round 5: You could get 9 points. The sum is: 8.
# Round 6: You could get -4 + 9 = 5 points. The sum is 13.
# Round 7: You could get 9 + 5 = 14 points. The sum is 27.
# Note:
# The size of the input list will be between 1 and 1000.
# Every integer represented in the list will be between -30000 and 30000.
import unittest
class Solution(object):
def calPoints(self, ops):
# Time: O(n)
# Space: O(n)
history = []
for op in ops:
if op == 'C':
history.pop()
elif op == 'D':
history.append(history[-1] * 2)
elif op == '+':
history.append(history[-1] + history[-2])
else:
history.append(int(op))
return sum(history)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/baseball-game/solution/
# 7ms 43.98%
class Solution {
public int calPoints(String[] ops) {
int sum = 0;
LinkedList<Integer> list = new LinkedList<>();
for (String op : ops) {
if (op.equals("C")) {
sum -= list.removeLast();
}
else if (op.equals("D")) {
list.add(list.peekLast() * 2);
sum += list.peekLast();
}
else if (op.equals("+")) {
list.add(list.peekLast() + list.get(list.size() - 2));
sum += list.peekLast();
}
else {
list.add(Integer.parseInt(op));
sum += list.peekLast();
}
}
return sum;
}
}
''' | apache-2.0 | 3,322,218,805,070,624,000 | 34.854167 | 135 | 0.588782 | false |
fancycode/dnsadmin | scripts/dnsadmin.py | 1 | 4877 | #
# Copyright (C) 2016 Joachim Bauch <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import cookielib
import functools
import json
import sys
import urllib2
class parse_response(object):
"""Decorator that parses returned data and checks contents for success."""
def __init__(self, action):
self.action = action
def __call__(self, f):
@functools.wraps(f)
def do_parse_response(*args, **kw):
try:
data = f(*args, **kw)
except urllib2.HTTPError, e:
print >> sys.stderr, '%s failed: %s (%s)' \
% (self.action, e.reason, e.code)
print >> sys.stderr, 'Server response: %s' % (e.read().strip())
return None
if data is None:
return None
elif not isinstance(data, basestring):
data = data.read()
try:
decoded = json.loads(data)
except Exception, e:
print >> sys.stderr, 'Server didn\'t return valid JSON: %s' \
% (e)
print >> sys.stderr, 'Server response: %r' % (data)
return None
if not isinstance(decoded, dict):
print >> sys.stderr, 'Server didn\'t return a map'
print >> sys.stderr, 'Server response: %r' % (data)
return None
if decoded.get('status') != 'ok':
print >> sys.stderr, 'Server didn\'t return a success status'
print >> sys.stderr, 'Server response: %r' % (decoded)
return None
return decoded['result']
return do_parse_response
class MethodAwareRequest(urllib2.Request):
"""Request that supports setting a custom HTTP method."""
def __init__(self, *args, **kw):
self.method = kw.pop('method', None)
urllib2.Request.__init__(self, *args, **kw)
def get_method(self):
if self.method is not None:
return self.method
return urllib2.Request.get_method(self)
class DnsAdminClient(object):
"""Client implementation for the DNS admin service."""
API_VERSION = 'v1'
def __init__(self, base_url):
# Remove any trailing slashes from base url.
if base_url[-1:] == '/':
base_url = base_url[:-1]
self.base_url = base_url + "/api/" + self.API_VERSION
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
def _perform_request(self, url, data=None, method=None):
"""Send GET/POST request to the server with correct headers."""
if data is not None:
data = json.dumps(data)
headers = {
'Content-Type': 'application/json',
}
req = MethodAwareRequest(url, data, headers, method=method)
else:
req = MethodAwareRequest(url, method=method)
return self.opener.open(req)
@parse_response('Login')
def login(self, username, password):
"""Authenticate user with the service."""
data = {
'username': username,
'password': password,
}
return self._perform_request(self.base_url + '/user/login', data)
@parse_response('Change password')
def changePassword(self, new_password):
"""Change password of logged in user."""
data = {
'password': new_password,
}
return self._perform_request(self.base_url + '/user/change-password', data)
@parse_response('List')
def listDomains(self):
"""Return list of registered domains."""
return self._perform_request(self.base_url + '/domain/list')
@parse_response('Register')
def registerSlave(self, domain, master):
"""Register slave domain."""
data = {
'master': master,
}
return self._perform_request(self.base_url + '/slave/' + domain,
data=data, method='PUT')
@parse_response('Unregister')
def unregisterSlave(self, domain):
"""Unregister slave domain."""
return self._perform_request(self.base_url + '/slave/' + domain,
method='DELETE')
| agpl-3.0 | 8,245,141,423,036,300,000 | 33.835714 | 83 | 0.58294 | false |
pinterest/mysql_utils | mysql_backup_csv.py | 1 | 33689 | #!/usr/bin/env python
import argparse
import datetime
import json
import logging
import multiprocessing
import os
import subprocess
import threading
import time
import traceback
import uuid
import boto
import _mysql_exceptions
import psutil
import safe_uploader
import mysql_backup_status
from lib import backup
from lib import environment_specific
from lib import host_utils
from lib import mysql_lib
ACTIVE = 'active'
CSV_BACKUP_LOCK_TABLE_NAME = 'backup_locks'
CSV_BACKUP_LOCK_TABLE = """CREATE TABLE IF NOT EXISTS {db}.{tbl} (
`lock_identifier` varchar(36) NOT NULL,
`lock_active` enum('active') DEFAULT 'active',
`created_at` datetime NOT NULL,
`expires` datetime DEFAULT NULL,
`released` datetime DEFAULT NULL,
`table_name` varchar(255) NOT NULL,
`partition_number` INT UNSIGNED NOT NULL DEFAULT 0,
`hostname` varchar(90) NOT NULL DEFAULT '',
`port` int(11) NOT NULL DEFAULT '0',
PRIMARY KEY (`lock_identifier`),
UNIQUE KEY `lock_active` (`table_name`,`partition_number`,`lock_active`),
INDEX `backup_location` (`hostname`, `port`),
INDEX `expires` (`expires`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1"""
MAX_THREAD_ERROR = 5
LOCKS_HELD_TIME = '5 MINUTE'
# How long locks are held and updated
LOCK_EXTEND_FREQUENCY = 10
# LOCK_EXTEND_FREQUENCY in seconds
PATH_PITR_DATA = 'pitr/{replica_set}/{db_name}/{table}/{date}'
SUCCESS_ENTRY = 'YAY_IT_WORKED'
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--db',
default=None,
help='DB to export, default is all databases.')
parser.add_argument('--force_table',
default=None,
help='Table to export, default is all tables.')
parser.add_argument('--force_reupload',
default=False,
action='store_true',
help='Ignore existing uploads, reupload everyting')
parser.add_argument('--loglevel',
default='INFO',
help='Change logging verbosity',
choices=set(['INFO', 'DEBUG']))
parser.add_argument('--dev_bucket',
default=False,
action='store_true',
help='Use the dev bucket, useful for testing')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper(), None))
# Nope, don't even start.
if os.path.isfile(backup.CSV_BACKUP_SKIP_FILE):
log.info('Found {}. Skipping CSV backup '
'run.'.format(backup.CSV_BACKUP_SKIP_FILE))
return
# If we ever want to run multi instance, this wil need to be updated
backup_obj = mysql_backup_csv(host_utils.HostAddr(host_utils.HOSTNAME),
args.db, args.force_table,
args.force_reupload, args.dev_bucket)
backup_obj.backup_instance()
class mysql_backup_csv:
def __init__(self, instance,
db=None, force_table=None,
force_reupload=False, dev_bucket=False):
""" Init function for backup, takes all args
Args:
instance - A hostAddr obect of the instance to be baced up
db - (option) backup only specified db
force_table - (option) backup only specified table
force_reupload - (optional) force reupload of backup
"""
self.instance = instance
self.session_id = None
self.timestamp = datetime.datetime.utcnow()
# datestamp is for s3 files which are by convention -1 day
self.datestamp = (self.timestamp -
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
self.tables_to_backup = multiprocessing.Queue()
self.tables_to_retry = multiprocessing.Queue()
if db:
table_list = ['{}.{}'.format(db, x) for x in mysql_lib.get_tables(instance, db, True)]
else:
table_list = mysql_lib.get_all_tables_by_instance(instance)
for t in backup.filter_tables_to_csv_backup(instance, table_list):
self.tables_to_backup.put(t)
self.dev_bucket = dev_bucket
self.force_table = force_table
self.force_reupload = force_reupload
self.table_count = 0
self.upload_bucket = environment_specific.S3_CSV_BUCKET_DEV \
if dev_bucket else environment_specific.S3_CSV_BUCKET
def backup_instance(self):
""" Back up a replica instance to s3 in csv """
log.info('Backup for instance {i} started at {t}'
''.format(t=str(self.timestamp),
i=self.instance))
log.info('Checking heartbeat to make sure replication is not too '
'lagged.')
self.check_replication_for_backup()
log.info('Taking host backup lock')
host_lock = host_utils.bind_lock_socket(backup.CSV_BACKUP_LOCK_SOCKET)
log.info('Setting up export directory structure')
self.setup_and_get_tmp_path()
log.info('Will temporarily dump inside of {path}'
''.format(path=self.dump_base_path))
log.info('Releasing any invalid shard backup locks')
self.ensure_backup_locks_sanity()
log.info('Deleting old expired locks')
self.purge_old_expired_locks()
log.info('Stopping replication SQL thread to get a snapshot')
mysql_lib.stop_replication(self.instance,
mysql_lib.REPLICATION_THREAD_SQL)
# starting a consistent snapshot here and retrieving the thread ID
conn = mysql_lib.connect_mysql(self.instance,
backup.USER_ROLE_MYSQLDUMP)
mysql_lib.start_consistent_snapshot(conn, read_only=True)
cursor = conn.cursor()
cursor.execute('SET SESSION wait_timeout=28800')
cursor.execute("SELECT VARIABLE_VALUE AS conn_id FROM "
"INFORMATION_SCHEMA.SESSION_VARIABLES "
"WHERE VARIABLE_NAME='pseudo_thread_id'")
self.session_id = cursor.fetchone()['conn_id']
workers = []
for _ in range(multiprocessing.cpu_count() / 2):
proc = multiprocessing.Process(target=self.mysql_backup_csv_tables)
proc.daemon = True
proc.start()
workers.append(proc)
# throw in a sleep to make sure all threads have started dumps
time.sleep(2)
log.info('Restarting replication')
mysql_lib.start_replication(self.instance,
mysql_lib.REPLICATION_THREAD_SQL)
for worker in workers:
worker.join()
if not (self.tables_to_backup.empty() and self.tables_to_retry.empty()):
raise Exception('All worker processes have completed, but '
'work remains in the queue')
log.info('CSV backup is complete, will run a check')
self.release_expired_locks()
mysql_backup_status.verify_csv_instance_backup(
self.instance,
self.datestamp,
self.dev_bucket)
host_utils.release_lock_socket(host_lock)
def mysql_backup_csv_tables(self):
""" Worker for backing up a queue of tables """
proc_id = multiprocessing.current_process().name
conn = mysql_lib.connect_mysql(self.instance,
backup.USER_ROLE_MYSQLDUMP)
mysql_lib.start_consistent_snapshot(conn, read_only=True,
session_id=self.session_id)
pitr_data = mysql_lib.get_pitr_data(self.instance)
err_count = 0
while not (self.tables_to_backup.empty() and self.tables_to_retry.empty()):
table_tuple = self.tables_to_retry.get() if not self.tables_to_retry.empty() \
else self.tables_to_backup.get()
try:
# if this is a partitioned table, and it is already
# being backed up on some other host, we do not want to attempt
# to back it up here.
#
if table_tuple[1] and self.partition_lock_exists(table_tuple):
log.debug('Partitioned table {} is already being '
'backed up elsewhere, so we cannot do it '
'here.'.format(table_tuple[0]))
else:
self.mysql_backup_csv_table_wrapper(table_tuple, conn, pitr_data)
self.table_count = self.table_count + 1
if (self.table_count % 50) == 0:
self.release_expired_locks()
except:
self.tables_to_retry.put(table_tuple)
log.error('{proc_id}: Could not dump {tbl}, partition {p} - '
'error: {e}'.format(tbl=table_tuple[0], p=table_tuple[2],
e=traceback.format_exc(),
proc_id=proc_id))
err_count = err_count + 1
if err_count > MAX_THREAD_ERROR:
log.error('{}: Error count in thread > MAX_THREAD_ERROR. '
'Aborting :('.format(proc_id))
return
def mysql_backup_csv_table_wrapper(self, table_tuple, conn, pitr_data):
""" Back up a single table or partition
Args:
table_tuple - A tuple containing the fully-qualified table name,
the partition name, and the partition number
conn - a connection the the mysql instance
pitr_data - data describing the position of the db data in replication
"""
proc_id = multiprocessing.current_process().name
if not self.force_reupload and self.already_backed_up(table_tuple):
log.info('{proc_id}: {tbl} partition {p} is already backed up, '
'skipping'.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2]))
return
# attempt to take lock by writing a lock to the master
tmp_dir_db = None
lock_identifier = None
extend_lock_thread = None
try:
self.release_expired_locks()
lock_identifier = self.take_backup_lock(table_tuple)
extend_lock_stop_event = threading.Event()
extend_lock_thread = threading.Thread(
target=self.extend_backup_lock,
args=(lock_identifier, extend_lock_stop_event))
extend_lock_thread.daemon = True
extend_lock_thread.start()
if not lock_identifier:
return
log.info('{proc_id}: {tbl} table, partition {p} backup start'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
tmp_dir_db = os.path.join(self.dump_base_path, table_tuple[0].split('.')[0])
if not os.path.exists(tmp_dir_db):
os.makedirs(tmp_dir_db)
host_utils.change_owner(tmp_dir_db, 'mysql', 'mysql')
self.upload_pitr_data(*table_tuple[0].split('.'), pitr_data=pitr_data)
self.mysql_backup_one_partition(table_tuple, tmp_dir_db, conn)
log.info('{proc_id}: {tbl} table, partition {p} backup complete'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
finally:
if extend_lock_thread:
extend_lock_stop_event.set()
log.debug('{proc_id}: {tbl} table, partition {p} waiting for '
'lock expiry thread to end'.format(tbl=table_tuple[0],
p=table_tuple[2],
proc_id=proc_id))
extend_lock_thread.join()
if lock_identifier:
log.debug('{proc_id}: {tbl} table, partition {p} releasing lock'
''.format(tbl=table_tuple[0], p=table_tuple[2],
proc_id=proc_id))
self.release_table_backup_lock(lock_identifier)
def mysql_backup_one_partition(self, table_tuple, tmp_dir_db, conn):
""" Back up a single partition of a single table
Args:
table_tuple - the table_tuple (db, partition name, partition number)
to be backed up
tmp_dir_db - temporary storage used for all tables in the db
conn - a connection the the mysql instance
"""
proc_id = multiprocessing.current_process().name
(_, data_path, _) = backup.get_csv_backup_paths(self.instance,
*table_tuple[0].split('.'),
date=self.datestamp,
partition_number=table_tuple[2])
log.debug('{proc_id}: {tbl} partition {p} dump to {path} started'
''.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2],
path=data_path))
self.upload_schema(*table_tuple[0].split('.'), tmp_dir_db=tmp_dir_db)
fifo = os.path.join(tmp_dir_db,
'{tbl}{part}'.format(tbl=table_tuple[0].split('.')[1],
part=table_tuple[2]))
procs = dict()
try:
# giant try so we can try to clean things up in case of errors
self.create_fifo(fifo)
# Start creating processes
procs['cat'] = subprocess.Popen(['cat', fifo],
stdout=subprocess.PIPE)
procs['nullescape'] = subprocess.Popen(['nullescape'],
stdin=procs['cat'].stdout,
stdout=subprocess.PIPE)
procs['lzop'] = subprocess.Popen(['lzop'],
stdin=procs['nullescape'].stdout,
stdout=subprocess.PIPE)
# Start dump query
return_value = set()
query_thread = threading.Thread(target=self.run_dump_query,
args=(table_tuple, fifo, conn,
procs['cat'], return_value))
query_thread.daemon = True
query_thread.start()
# And run the upload
safe_uploader.safe_upload(precursor_procs=procs,
stdin=procs['lzop'].stdout,
bucket=self.upload_bucket,
key=data_path,
check_func=self.check_dump_success,
check_arg=return_value)
os.remove(fifo)
log.debug('{proc_id}: {tbl} partition {p} clean up complete'
''.format(proc_id=proc_id,
tbl=table_tuple[0],
p=table_tuple[2]))
except:
log.debug('{}: in exception handling for failed table '
'upload'.format(proc_id))
if os.path.exists(fifo):
self.cleanup_fifo(fifo)
raise
def create_fifo(self, fifo):
""" Create a fifo to be used for dumping a mysql table
Args:
fifo - The path to the fifo
"""
if os.path.exists(fifo):
self.cleanup_fifo(fifo)
log.debug('{proc_id}: creating fifo {fifo}'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
os.mkfifo(fifo)
# Could not get os.mkfifo(fifo, 0777) to work due to umask
host_utils.change_owner(fifo, 'mysql', 'mysql')
def cleanup_fifo(self, fifo):
""" Safely cleanup a fifo that is an unknown state
Args:
fifo - The path to the fifo
"""
log.debug('{proc_id}: Cleanup of {fifo} started'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
cat_proc = subprocess.Popen('timeout 5 cat {} >/dev/null'.format(fifo),
shell=True)
cat_proc.wait()
os.remove(fifo)
log.debug('{proc_id}: Cleanup of {fifo} complete'
''.format(proc_id=multiprocessing.current_process().name,
fifo=fifo))
def run_dump_query(self, table_tuple, fifo, conn, cat_proc, return_value):
""" Run a SELECT INTO OUTFILE into a fifo
Args:
table_tuple - A tuple of (table_name, partition_name, partition_number)
fifo - The fifo to dump the table.db into
conn - The connection to MySQL
cat_proc - The process reading from the fifo
return_value - A set to be used to populated the return status. This is
a semi-ugly hack that is required because of the use of
threads not being able to return data, however being
able to modify objects (like a set).
"""
log.debug('{proc_id}: {tbl} partition {p} dump started'
''.format(proc_id=multiprocessing.current_process().name,
tbl=table_tuple[0],
p=table_tuple[2]))
extra = '' if not table_tuple[1] else " PARTITION ({})".format(table_tuple[1])
(db, tbl) = table_tuple[0].split('.')
sql = ("SELECT * "
"INTO OUTFILE '{fifo}' "
"FROM {db}.`{tbl}` {extra} "
"").format(fifo=fifo,
db=db,
tbl=tbl,
extra=extra)
cursor = conn.cursor()
try:
cursor.execute(sql)
except Exception as detail:
# if we have not output any data, then the cat proc will never
# receive an EOF, so we will be stuck
if psutil.pid_exists(cat_proc.pid):
cat_proc.kill()
log.error('{proc_id}: dump query encountered an error: {er}'
''.format(
er=detail,
proc_id=multiprocessing.current_process().name))
log.debug('{proc_id}: {tbl} partition {p} dump complete'
''.format(proc_id=multiprocessing.current_process().name,
tbl=table_tuple[0], p=table_tuple[2]))
return_value.add(SUCCESS_ENTRY)
def check_dump_success(self, return_value):
""" Check to see if a dump query succeeded
Args:
return_value - A set which if it includes SUCCESS_ENTRY shows that
the query succeeded
"""
if SUCCESS_ENTRY not in return_value:
raise Exception('{}: dump failed'
''.format(multiprocessing.current_process().name))
def upload_pitr_data(self, db, tbl, pitr_data):
""" Upload a file of PITR data to s3 for each table
Args:
db - the db that was backed up.
tbl - the table that was backed up.
pitr_data - a dict of various data that might be helpful for running a
PITR
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
s3_path = PATH_PITR_DATA.format(replica_set=replica_set,
date=self.datestamp,
db_name=db, table=tbl)
log.debug('{proc_id}: {db}.{tbl} Uploading pitr data to {s3_path}'
''.format(s3_path=s3_path,
proc_id=multiprocessing.current_process().name,
db=db, tbl=tbl))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(s3_path)
key.set_contents_from_string(json.dumps(pitr_data))
def upload_schema(self, db, table, tmp_dir_db):
""" Upload the schema of a table to s3
Args:
db - the db to be backed up
table - the table to be backed up
tmp_dir_db - temporary storage used for all tables in the db
"""
(schema_path, _, _) = backup.get_csv_backup_paths(
self.instance, db, table, self.datestamp)
create_stm = mysql_lib.show_create_table(self.instance, db, table)
log.debug('{proc_id}: Uploading schema to {schema_path}'
''.format(schema_path=schema_path,
proc_id=multiprocessing.current_process().name))
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
key = bucket.new_key(schema_path)
key.set_contents_from_string(create_stm)
def partition_lock_exists(self, table_tuple):
""" Find out if there is already a lock on one partition of a
partitioned table from a host other than us. If so, we
cannot backup that table here.
Args:
table_tuple - the tuple of table information.
Returns:
True if there is such a lock, False if not.
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(
replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'table_name': table_tuple[0],
'hostname': self.instance.hostname,
'port': self.instance.port,
'active': ACTIVE}
sql = ("SELECT COUNT(*) AS cnt FROM {db}.{tbl} WHERE "
"lock_active = %(active)s AND "
"table_name = %(table_name)s AND "
"hostname <> %(hostname)s AND "
"port = %(port)s").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
row = int(cursor.fetchone()['cnt'])
return (row > 0)
def take_backup_lock(self, table_tuple):
""" Write a lock row on to the master
Args:
table_tuple - the tuple containing info about the table/partition
to be backed up.
Returns:
a uuid lock identifier
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(
replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
lock_identifier = str(uuid.uuid4())
log.debug('Taking backup lock: {replica_set} {tbl} partition {p}'
''.format(replica_set=replica_set,
tbl=table_tuple[0], p=table_tuple[2]))
params = {'lock': lock_identifier,
'table_name': table_tuple[0],
'partition_number': table_tuple[2],
'hostname': self.instance.hostname,
'port': self.instance.port,
'active': ACTIVE}
sql = ("INSERT INTO {db}.{tbl} "
"SET "
"lock_identifier = %(lock)s, "
"lock_active = %(active)s, "
"created_at = NOW(), "
"expires = NOW() + INTERVAL {locks_held_time}, "
"released = NULL, "
"table_name = %(table_name)s, "
"partition_number = %(partition_number)s, "
"hostname = %(hostname)s, "
"port = %(port)s"
"").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME,
locks_held_time=LOCKS_HELD_TIME)
cursor = master_conn.cursor()
try:
cursor.execute(sql, params)
master_conn.commit()
except _mysql_exceptions.IntegrityError:
lock_identifier = None
sql = ("SELECT hostname, port, expires "
"FROM {db}.{tbl} "
"WHERE "
" lock_active = %(active)s AND "
" table_name = %(table_name)s AND "
" partition_number = %(partition_number)s"
"").format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql,
{'table_name': table_tuple[0],
'partition_number': table_tuple[2],
'active': ACTIVE})
ret = cursor.fetchone()
log.debug('Table {tbl} (partition {p}) is being backed '
'up on {hostname}:{port}, '
'lock will expire at {expires}.'
''.format(tbl=table_tuple[0],
p=table_tuple[2],
hostname=ret['hostname'],
port=ret['port'],
expires=str(ret['expires'])))
log.debug(cursor._executed)
return lock_identifier
def extend_backup_lock(self, lock_identifier, extend_lock_stop_event):
""" Extend a backup lock. This is to be used by a thread
Args:
lock_identifier - Corrosponds to a lock identifier row in the
CSV_BACKUP_LOCK_TABLE_NAME.
extend_lock_stop_event - An event that will be used to inform this
thread to stop extending the lock
"""
# Assumption is that this is callled right after creating the lock
last_update = time.time()
while(not extend_lock_stop_event.is_set()):
if (time.time() - last_update) > LOCK_EXTEND_FREQUENCY:
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'lock_identifier': lock_identifier}
sql = ('UPDATE {db}.{tbl} '
'SET expires = NOW() + INTERVAL {locks_held_time} '
'WHERE lock_identifier = %(lock_identifier)s'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME,
locks_held_time=LOCKS_HELD_TIME)
cursor.execute(sql, params)
master_conn.commit()
log.debug(cursor._executed)
last_update = time.time()
extend_lock_stop_event.wait(.5)
def release_table_backup_lock(self, lock_identifier):
""" Release a backup lock created by take_backup_lock
Args:
lock_identifier - a uuid to identify a lock row
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
params = {'lock_identifier': lock_identifier}
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE lock_identifier = %(lock_identifier)s AND '
' lock_active is NOT NULL'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
master_conn.commit()
log.debug(cursor._executed)
def ensure_backup_locks_sanity(self):
""" Release any backup locks that aren't sane. This means locks
created by the same host as the caller. The instance level lock
should allow this assumption to be correct.
"""
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
if not mysql_lib.does_table_exist(master, mysql_lib.METADATA_DB,
CSV_BACKUP_LOCK_TABLE_NAME):
log.debug('Creating missing metadata table')
cursor.execute(CSV_BACKUP_LOCK_TABLE.format(
db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME))
params = {'hostname': self.instance.hostname,
'port': self.instance.port}
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE hostname = %(hostname)s AND '
' port = %(port)s'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql, params)
master_conn.commit()
def release_expired_locks(self):
""" Release any expired locks """
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
sql = ('UPDATE {db}.{tbl} '
'SET lock_active = NULL, released = NOW() '
'WHERE expires < NOW() AND lock_active IS NOT NULL'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql)
master_conn.commit()
log.debug(cursor._executed)
def purge_old_expired_locks(self):
""" Delete any locks older than 2 days """
zk = host_utils.MysqlZookeeper()
replica_set = zk.get_replica_set_from_instance(self.instance)
master = zk.get_mysql_instance_from_replica_set(replica_set,
host_utils.REPLICA_ROLE_MASTER)
master_conn = mysql_lib.connect_mysql(master, role='dbascript')
cursor = master_conn.cursor()
sql = ('DELETE FROM {db}.{tbl} '
'WHERE expires < NOW() - INTERVAL 2 DAY'
'').format(db=mysql_lib.METADATA_DB,
tbl=CSV_BACKUP_LOCK_TABLE_NAME)
cursor.execute(sql)
master_conn.commit()
log.debug(cursor._executed)
def already_backed_up(self, table_tuple):
""" Check to see if a particular partition has already been uploaded
to s3
Args:
table_tuple - (table, partition name, part number)
Returns:
bool - True if the partition has already been backed up,
False otherwise
"""
boto_conn = boto.connect_s3()
bucket = boto_conn.get_bucket(self.upload_bucket, validate=False)
(_, data_path, _) = backup.get_csv_backup_paths(self.instance,
*table_tuple[0].split('.'),
date=self.datestamp,
partition_number=table_tuple[2])
if not bucket.get_key(data_path):
return False
return True
def check_replication_for_backup(self):
""" Confirm that replication is caught up enough to run """
while True:
heartbeat = mysql_lib.get_heartbeat(self.instance)
if heartbeat.date() < self.timestamp.date():
log.warning('Replication is too lagged ({}) to run daily '
'backup, sleeping'.format(heartbeat))
time.sleep(10)
elif heartbeat.date() > self.timestamp.date():
raise Exception('Replication is later than expected day')
else:
log.info('Replication is ok ({}) to run daily '
'backup'.format(heartbeat))
return
def setup_and_get_tmp_path(self):
""" Figure out where to temporarily store csv backups,
and clean it up
"""
tmp_dir_root = os.path.join(host_utils.find_root_volume(),
'csv_export',
str(self.instance.port))
if not os.path.exists(tmp_dir_root):
os.makedirs(tmp_dir_root)
host_utils.change_owner(tmp_dir_root, 'mysql', 'mysql')
self.dump_base_path = tmp_dir_root
if __name__ == "__main__":
environment_specific.initialize_logger()
main()
| gpl-2.0 | -410,670,963,058,634,600 | 42.808843 | 98 | 0.531123 | false |
docusign/docusign-python-client | docusign_esign/models/workflow.py | 1 | 5166 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Workflow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'current_workflow_step_id': 'str',
'workflow_status': 'str',
'workflow_steps': 'list[WorkflowStep]'
}
attribute_map = {
'current_workflow_step_id': 'currentWorkflowStepId',
'workflow_status': 'workflowStatus',
'workflow_steps': 'workflowSteps'
}
def __init__(self, current_workflow_step_id=None, workflow_status=None, workflow_steps=None): # noqa: E501
"""Workflow - a model defined in Swagger""" # noqa: E501
self._current_workflow_step_id = None
self._workflow_status = None
self._workflow_steps = None
self.discriminator = None
if current_workflow_step_id is not None:
self.current_workflow_step_id = current_workflow_step_id
if workflow_status is not None:
self.workflow_status = workflow_status
if workflow_steps is not None:
self.workflow_steps = workflow_steps
@property
def current_workflow_step_id(self):
"""Gets the current_workflow_step_id of this Workflow. # noqa: E501
# noqa: E501
:return: The current_workflow_step_id of this Workflow. # noqa: E501
:rtype: str
"""
return self._current_workflow_step_id
@current_workflow_step_id.setter
def current_workflow_step_id(self, current_workflow_step_id):
"""Sets the current_workflow_step_id of this Workflow.
# noqa: E501
:param current_workflow_step_id: The current_workflow_step_id of this Workflow. # noqa: E501
:type: str
"""
self._current_workflow_step_id = current_workflow_step_id
@property
def workflow_status(self):
"""Gets the workflow_status of this Workflow. # noqa: E501
# noqa: E501
:return: The workflow_status of this Workflow. # noqa: E501
:rtype: str
"""
return self._workflow_status
@workflow_status.setter
def workflow_status(self, workflow_status):
"""Sets the workflow_status of this Workflow.
# noqa: E501
:param workflow_status: The workflow_status of this Workflow. # noqa: E501
:type: str
"""
self._workflow_status = workflow_status
@property
def workflow_steps(self):
"""Gets the workflow_steps of this Workflow. # noqa: E501
# noqa: E501
:return: The workflow_steps of this Workflow. # noqa: E501
:rtype: list[WorkflowStep]
"""
return self._workflow_steps
@workflow_steps.setter
def workflow_steps(self, workflow_steps):
"""Sets the workflow_steps of this Workflow.
# noqa: E501
:param workflow_steps: The workflow_steps of this Workflow. # noqa: E501
:type: list[WorkflowStep]
"""
self._workflow_steps = workflow_steps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Workflow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Workflow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 5,507,776,582,154,696,000 | 28.861272 | 140 | 0.577429 | false |
nttcom/eclcli | eclcli/orchestration/heatclient/common/http.py | 1 | 13729 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import logging
import os
import socket
from oslo_serialization import jsonutils
from oslo_utils import encodeutils, importutils
import requests
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.common import utils
from eclcli.orchestration.heatclient import exc
from eclcli.orchestration.heatclient.openstack.common._i18n import _
from eclcli.orchestration.heatclient.openstack.common._i18n import _LW
from keystoneauth1 import adapter
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-heatclient'
CHUNKSIZE = 1024 * 64 # 64kB
SENSITIVE_HEADERS = ('X-Auth-Token',)
osprofiler_web = importutils.try_import("osprofiler.web")
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem',
'/System/Library/OpenSSL/certs/cacert.pem',
requests.certs.where()]
for ca in ca_path:
LOG.debug("Looking for ca file %s", ca)
if os.path.exists(ca):
LOG.debug("Using ca file %s", ca)
return ca
LOG.warning(_LW("System ca file could not be found."))
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.auth_url = kwargs.get('auth_url')
self.auth_token = kwargs.get('token')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.region_name = kwargs.get('region_name')
self.include_pass = kwargs.get('include_pass')
self.endpoint_url = endpoint
self.cert_file = kwargs.get('cert_file')
self.key_file = kwargs.get('key_file')
self.timeout = kwargs.get('timeout')
self.ssl_connection_params = {
'ca_file': kwargs.get('ca_file'),
'cert_file': kwargs.get('cert_file'),
'key_file': kwargs.get('key_file'),
'insecure': kwargs.get('insecure'),
}
self.verify_cert = None
if parse.urlparse(endpoint).scheme == "https":
if kwargs.get('insecure'):
self.verify_cert = False
else:
self.verify_cert = kwargs.get('ca_file', get_system_ca_file())
# FIXME(shardy): We need this for compatibility with the oslo apiclient
# we should move to inheriting this class from the oslo HTTPClient
self.last_request_id = None
def safe_header(self, name, value):
if name in SENSITIVE_HEADERS:
# because in python3 byte string handling is ... ug
v = value.encode('utf-8')
h = hashlib.sha1(v)
d = h.hexdigest()
return encodeutils.safe_decode(name), "{SHA1}%s" % d
else:
return (encodeutils.safe_decode(name),
encodeutils.safe_decode(value))
def log_curl_request(self, method, url, kwargs):
curl = ['curl -g -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % self.safe_header(key, value)
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('ca_file', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.ssl_connection_params.get(key)
if value:
curl.append(fmt % value)
if self.ssl_connection_params.get('insecure'):
curl.append('-k')
if 'data' in kwargs:
curl.append('-d \'%s\'' % kwargs['data'])
curl.append('%s%s' % (self.endpoint, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp):
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.headers.items()])
dump.append('')
if resp.content:
content = resp.content
if isinstance(content, six.binary_type):
content = content.decode()
dump.extend([content, ''])
LOG.debug('\n'.join(dump))
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around requests.request to handle tasks such as
setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
else:
kwargs['headers'].update(self.credentials_headers())
if self.auth_url:
kwargs['headers'].setdefault('X-Auth-Url', self.auth_url)
if self.region_name:
kwargs['headers'].setdefault('X-Region-Name', self.region_name)
if self.include_pass and 'X-Auth-Key' not in kwargs['headers']:
kwargs['headers'].update(self.credentials_headers())
if osprofiler_web:
kwargs['headers'].update(osprofiler_web.get_trace_id_headers())
self.log_curl_request(method, url, kwargs)
if self.cert_file and self.key_file:
kwargs['cert'] = (self.cert_file, self.key_file)
if self.verify_cert is not None:
kwargs['verify'] = self.verify_cert
if self.timeout is not None:
kwargs['timeout'] = float(self.timeout)
# Allow caller to specify not to follow redirects, in which case we
# just return the redirect response. Useful for using stacks:lookup.
redirect = kwargs.pop('redirect', True)
# Since requests does not follow the RFC when doing redirection to sent
# back the same method on a redirect we are simply bypassing it. For
# example if we do a DELETE/POST/PUT on a URL and we get a 302 RFC says
# that we should follow that URL with the same method as before,
# requests doesn't follow that and send a GET instead for the method.
# Hopefully this could be fixed as they say in a comment in a future
# point version i.e.: 3.x
# See issue: https://github.com/kennethreitz/requests/issues/1704
allow_redirects = False
try:
resp = requests.request(
method,
self.endpoint_url + url,
allow_redirects=allow_redirects,
**kwargs)
except socket.gaierror as e:
message = (_("Error finding address for %(url)s: %(e)s") %
{'url': self.endpoint_url + url, 'e': e})
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout) as e:
endpoint = self.endpoint
message = (_("Error communicating with %(endpoint)s %(e)s") %
{'endpoint': endpoint, 'e': e})
raise exc.CommunicationError(message=message)
self.log_http_response(resp)
if not ('X-Auth-Key' in kwargs['headers']) and (
resp.status_code == 401 or
(resp.status_code == 500 and "(HTTP 401)" in resp.content)):
raise exc.HTTPUnauthorized(_("Authentication failed: %s")
% resp.content)
elif 400 <= resp.status_code < 600:
raise exc.from_response(resp)
elif resp.status_code in (301, 302, 305):
# Redirected. Reissue the request to the new location,
# unless caller specified redirect=False
if redirect:
location = resp.headers.get('location')
path = self.strip_endpoint(location)
resp = self._http_request(path, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp)
return resp
def strip_endpoint(self, location):
if location is None:
message = _("Location not returned with 302")
raise exc.InvalidEndpoint(message=message)
elif location.lower().startswith(self.endpoint.lower()):
return location[len(self.endpoint):]
else:
message = _("Prohibited endpoint redirect %s") % location
raise exc.InvalidEndpoint(message=message)
def credentials_headers(self):
creds = {}
# NOTE(dhu): (shardy) When deferred_auth_method=password, Heat
# encrypts and stores username/password. For Keystone v3, the
# intent is to use trusts since SHARDY is working towards
# deferred_auth_method=trusts as the default.
# TODO(dhu): Make Keystone v3 work in Heat standalone mode. Maye
# require X-Auth-User-Domain.
if self.username:
creds['X-Auth-User'] = self.username
if self.password:
creds['X-Auth-Key'] = self.password
return creds
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'data' in kwargs:
kwargs['data'] = jsonutils.dumps(kwargs['data'])
resp = self._http_request(url, method, **kwargs)
body = utils.get_response_body(resp)
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
def client_request(self, method, url, **kwargs):
resp, body = self.json_request(method, url, **kwargs)
return resp
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.raw_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
class SessionClient(adapter.LegacyJsonAdapter):
"""HTTP client based on Keystone client session."""
def __init__(self, *args, **kwargs):
self.username = kwargs.pop('username', None)
self.password = kwargs.pop('password', None)
super(SessionClient, self).__init__(*args, **kwargs)
def request(self, url, method, **kwargs):
redirect = kwargs.get('redirect')
kwargs.setdefault('user_agent', USER_AGENT)
try:
kwargs.setdefault('json', kwargs.pop('data'))
except KeyError:
pass
resp, body = super(SessionClient, self).request(
url, method,
raise_exc=False,
**kwargs)
if 400 <= resp.status_code < 600:
raise exc.from_response(resp)
elif resp.status_code in (301, 302, 305):
if redirect:
location = resp.headers.get('location')
path = self.strip_endpoint(location)
resp = self.request(path, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp)
return resp
def credentials_headers(self):
return {}
def strip_endpoint(self, location):
if location is None:
message = _("Location not returned with 302")
raise exc.InvalidEndpoint(message=message)
if (self.endpoint_override is not None and
location.lower().startswith(self.endpoint_override.lower())):
return location[len(self.endpoint_override):]
else:
return location
def _construct_http_client(endpoint=None, username=None, password=None,
include_pass=None, endpoint_type=None,
auth_url=None, **kwargs):
session = kwargs.pop('session', None)
auth = kwargs.pop('auth', None)
if session:
kwargs['endpoint_override'] = endpoint
if username:
kwargs.update({'username': username})
if password:
kwargs.update({'password': password})
return SessionClient(session, auth=auth, **kwargs)
else:
return HTTPClient(endpoint=endpoint, username=username,
password=password, include_pass=include_pass,
endpoint_type=endpoint_type, auth_url=auth_url,
**kwargs)
| apache-2.0 | -2,518,008,748,685,360,600 | 37.564607 | 79 | 0.592541 | false |
uclouvain/osis | base/tests/templatetags/test_education_group_pdf.py | 1 | 5264 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import SimpleTestCase
from django.utils.translation import gettext_lazy as _
from mock import patch
from base.models.enums.education_group_types import TrainingType
from base.templatetags import education_group_pdf
from base.templatetags.education_group_pdf import format_complete_title_label
from program_management.tests.ddd.factories.link import LinkFactory
from program_management.tests.ddd.factories.node import NodeLearningUnitYearFactory, NodeGroupYearFactory
class TestGetVerboseLink(SimpleTestCase):
def test_when_child_link_is_group(self):
link = LinkFactory(
child=NodeGroupYearFactory(
credits=5,
group_title_fr='Offer title',
group_title_en='Offer title',
node_type=TrainingType.BACHELOR
),
relative_credits=6,
)
expected_result = "Offer title (6 {})".format(_('credits'))
self.assertEqual(expected_result, education_group_pdf.get_verbose_link(link))
@patch('base.templatetags.education_group_pdf.get_verbose_title_ue', return_value="Title learning unit")
@patch('base.templatetags.education_group_pdf.get_volume_total_verbose', return_value="15 + 20")
def test_when_child_link_is_learning_unit(self, *mocks):
link = LinkFactory(
child=NodeLearningUnitYearFactory(
code='LDROI1001',
credits=4,
),
relative_credits=6,
)
expected_result = "LDROI1001 Title learning unit [15 + 20] (6 {})".format(_('credits'))
self.assertEqual(expected_result, education_group_pdf.get_verbose_link(link))
class TestFormatTitleLabel(SimpleTestCase):
def test_format_complete_title_label_with_standard_version(self):
node = NodeGroupYearFactory(
offer_title_fr="Offer title fr",
offer_title_en="Offer title en",
group_title_fr="Group title fr",
group_title_en="Group title en",
offer_partial_title_fr="Offer partial title fr",
offer_partial_title_en="Offer partial title en",
version_title_fr=None,
version_title_en=None
)
expected_result = "Group title fr "
complete_title_label = format_complete_title_label(node, node.group_title_en, node.group_title_fr)
self.assertEqual(complete_title_label, expected_result)
def test_format_complete_title_label_with_specific_version_and_title(self):
node = NodeGroupYearFactory(
offer_title_fr="Offer title fr",
offer_title_en="Offer title en",
group_title_fr="Group title fr",
group_title_en="Group title en",
offer_partial_title_fr="Offer partial title fr",
offer_partial_title_en="Offer partial title en",
version_title_fr="Version title fr",
version_title_en="Version title en",
version_name="VERSION"
)
expected_result = "Group title fr - Version title fr [VERSION]"
complete_title_label = format_complete_title_label(node, node.group_title_en, node.group_title_fr)
self.assertEqual(complete_title_label, expected_result)
def test_format_complete_title_label_with_specific_version_and_title(self):
node = NodeGroupYearFactory(
offer_title_fr="Offer title fr",
offer_title_en="Offer title en",
group_title_fr="Group title fr",
group_title_en="Group title en",
offer_partial_title_fr="Offer partial title fr",
offer_partial_title_en="Offer partial title en",
version_title_fr=None,
version_title_en=None,
version_name="VERSION"
)
expected_result = "Group title fr [VERSION]"
complete_title_label = format_complete_title_label(node, node.group_title_en, node.group_title_fr)
self.assertEqual(complete_title_label, expected_result)
| agpl-3.0 | -3,077,404,031,487,803,000 | 44.765217 | 108 | 0.646399 | false |
ramineni/my_congress | congress_dashboard/policies/views.py | 1 | 5367 | # Copyright 2014 VMware.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import dictsort
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from congress_dashboard.api import congress
import congress_dashboard.datasources.utils as ds_utils
from congress_dashboard.policies import forms as policies_forms
from congress_dashboard.policies.rules import tables as rules_tables
from congress_dashboard.policies import tables as policies_tables
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
"""List policies."""
table_class = policies_tables.PoliciesTable
template_name = 'admin/policies/index.html'
def get_data(self):
try:
policies = congress.policies_list(self.request)
except Exception as e:
msg = _('Unable to get policies list: %s') % str(e)
LOG.error(msg)
messages.error(self.request, msg)
return []
return policies
class CreateView(forms.ModalFormView):
form_class = policies_forms.CreatePolicy
template_name = 'admin/policies/create.html'
success_url = reverse_lazy('horizon:admin:policies:index')
class DetailView(tables.DataTableView):
"""List details about and rules in a policy."""
table_class = rules_tables.PolicyRulesTable
template_name = 'admin/policies/detail.html'
def get_data(self):
policy_name = self.kwargs['policy_name']
try:
policy_rules = congress.policy_rules_list(self.request,
policy_name)
except Exception as e:
msg_args = {'policy_name': policy_name, 'error': str(e)}
msg = _('Unable to get rules in policy "%(policy_name)s": '
'%(error)s') % msg_args
LOG.error(msg)
messages.error(self.request, msg)
redirect = reverse('horizon:admin:policies:index')
raise exceptions.Http302(redirect)
for r in policy_rules:
r.set_id_as_name_if_empty()
return policy_rules
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
policy_name = kwargs['policy_name']
try:
policy = congress.policy_get(self.request, policy_name)
except Exception as e:
msg_args = {'policy_name': policy_name, 'error': str(e)}
msg = _('Unable to get policy "%(policy_name)s": '
'%(error)s') % msg_args
LOG.error(msg)
messages.error(self.request, msg)
redirect = reverse('horizon:admin:policies:index')
raise exceptions.Http302(redirect)
context['policy'] = policy
# Alphabetize and convert list of data source tables and columns into
# JSON formatted string consumable by JavaScript. Do this here instead
# of in the Create Rule form so that the tables and columns lists
# appear in the HTML document before the JavaScript that uses them.
all_tables = ds_utils.get_datasource_tables(self.request)
sorted_datasources = dictsort(all_tables, 'datasource')
tables = []
for ds in sorted_datasources:
datasource_tables = ds['tables']
datasource_tables.sort()
for table in ds['tables']:
tables.append('%s%s%s' % (ds['datasource'],
congress.TABLE_SEPARATOR, table))
context['tables'] = json.dumps(tables)
datasource_columns = ds_utils.get_datasource_columns(self.request)
sorted_datasources = dictsort(datasource_columns, 'datasource')
columns = []
for ds in sorted_datasources:
sorted_tables = dictsort(ds['tables'], 'table')
for tbl in sorted_tables:
# Ignore service-derived tables, which are already included.
if congress.TABLE_SEPARATOR in tbl['table']:
continue
table_columns = tbl['columns']
if table_columns:
table_columns.sort()
else:
# Placeholder name for column when the table has none.
table_columns = ['_']
for column in table_columns:
columns.append('%s%s%s %s' % (ds['datasource'],
congress.TABLE_SEPARATOR,
tbl['table'], column))
context['columns'] = json.dumps(columns)
return context
| apache-2.0 | -726,373,775,188,115,800 | 39.353383 | 78 | 0.618968 | false |
JohnPeel/Sarabi | misc.py | 1 | 2767 |
import os
import shutil
import stat
import yaml
from strings import *
ignored_paths = ['.git']
class EPackageNotFound(Exception):
pass
class EAmbiguousAtom(Exception):
def __init__(self, valid_packages):
self.valid_packages = valid_packages
message = PACKAGE_TOO_AMBIGUOUS % len(valid_packages)
message += '\n\n'
for package in valid_packages:
message += '\n '.join(package.info())
message += '\n'
message += '\n'
super(EAmbiguousAtom, self).__init__(message)
def get_default_config(program):
(path, executable) = os.path.split(program)
return os.path.abspath(os.path.join(path, os.path.splitext(executable)[0] + '.yml'))
def get_repo(repo):
if (repo[:7] == 'github:'):
return 'https://github.com/%s.git' % repo[7:]
if (repo[:4] == 'git:'):
return repo[4:]
return repo
def parse_package_atom(package):
remote = None
if ('::' in package):
(package, remote) = package.split('::', 2)
catagory = None
if ('/' in package):
(catagory, package) = package.split('/', 2)
version = None
if ('-' in package):
(package, version) = package.split('-', 2)
return {
'catagory': catagory,
'name': package,
'version': version,
'remote': remote
}
def listdir(dir):
return [os.path.relpath(os.path.join(dp, f), dir) for dp, dn, fn in os.walk(dir) for f in fn]
def copytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
class ConfDict(dict):
def __init__(self, filename = None, **kwds):
super(ConfDict, self).__init__(**kwds)
self.set_defaults()
if filename and os.path.exists(filename):
self.load(filename)
def __del__(self):
if hasattr(self, 'filename') and self.filename:
self.save()
def set_defaults(self):
pass
def load(self, filename):
if (os.path.exists(filename)):
self.filename = filename
with open(filename, 'r') as file:
self.update(yaml.load(file))
def save(self, filename = None):
if (not filename):
filename = self.filename
with open(filename, 'w') as file:
yaml.dump(dict(self), file, default_flow_style=False)
| gpl-3.0 | 873,124,693,430,433,500 | 23.927928 | 95 | 0.606433 | false |
ianmiell/shutit-distro | postgresql/postgresql.py | 1 | 1264 | """ShutIt module. See http://shutit.tk
"""
#http://www.linuxfromscratch.org/blfs/view/svn/postlfs/mitkrb.html configs and context and configureation
from shutit_module import ShutItModule
class postgresql(ShutItModule):
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/postgresql')
shutit.send('cd /tmp/build/postgresql')
shutit.send('wget -qO- http://ftp.postgresql.org/pub/source/v9.4.0/postgresql-9.4.0.tar.bz2 | bunzip2 -c | tar -xf -')
shutit.send('cd postgres*')
shutit.send('''sed -i '/DEFAULT_PGSOCKET_DIR/s@/tmp@/run/postgresql@' src/include/pg_config_manual.h''')
shutit.send('./configure --prefix=/usr --enable-thread-safety --docdir=/usr/share/doc/postgresql-9.4.0')
shutit.send('make')
shutit.send('make install')
shutit.send('make install-docs')
# TODO: server http://www.linuxfromscratch.org/blfs/view/svn/server/postgresql.html
return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/postgresql')
return True
def module():
return postgresql(
'shutit.tk.sd.postgresql.postgresql', 158844782.0255,
description='',
maintainer='',
depends=['shutit.tk.sd.tcl.tcl','shutit.tk.sd.open_ldap.open_ldap','shutit.tk.sd.linux_pam.linux_pam','shutit.tk.sd.mit_kerberos_v5.mit_kerberos_v5']
)
| gpl-2.0 | 7,476,906,327,428,438,000 | 37.30303 | 151 | 0.719146 | false |
cprn/samegame | model.py | 1 | 1390 | import random
data = []
NUM_COLOURS = 5
GRID_SIZE = 20
def init():
for y in range(GRID_SIZE):
r = []
for x in range(GRID_SIZE):
r.append(random.randint(0, NUM_COLOURS - 1))
data.append(r)
def get_block_colour(x, y):
if x in range(get_width()) and y in range(get_height()):
return data[x][y]
return None
def get_siblings(x, y, siblings=None):
colour = get_block_colour(x, y)
if colour is None:
return []
if siblings is None:
siblings = [(x, y)]
for neighbour in [(x-1, y), (x, y-1), (x+1, y), (x, y+1)]:
if neighbour in siblings:
continue
if get_block_colour(*neighbour) == colour:
siblings.append(neighbour)
siblings = get_siblings(*neighbour, siblings=siblings)
return siblings
def get_height():
if len(data):
return len(data[0])
return 0
def get_width():
return len(data)
def remove_block(x, y):
for block in get_siblings(x, y):
data[block[0]][block[1]] = None
def run_gravity():
for x in reversed(range(get_width())):
if data[x] == [None] * get_height():
data.pop(x)
continue
for y in reversed(range(get_height())):
if get_block_colour(x, y) is None and y != 0:
data[x][y] = data[x][y-1]
data[x][y-1] = None
| unlicense | 6,598,916,812,739,268,000 | 22.166667 | 66 | 0.539568 | false |
silverapp/silver | silver/api/views/transaction_views.py | 1 | 4548 | # Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from uuid import UUID
from django_filters.rest_framework import DjangoFilterBackend
from django_fsm import TransitionNotAllowed
from django.http import Http404
from rest_framework import permissions, status
from rest_framework.generics import ListCreateAPIView, get_object_or_404, RetrieveUpdateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from silver.api.filters import TransactionFilter
from silver.api.serializers.transaction_serializers import TransactionSerializer
from silver.models import PaymentMethod, Transaction
class TransactionList(ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = TransactionSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = TransactionFilter
def get_queryset(self):
customer_pk = self.kwargs.get('customer_pk', None)
payment_method_id = self.kwargs.get('payment_method_id')
if payment_method_id:
payment_method = get_object_or_404(PaymentMethod,
id=payment_method_id,
customer__pk=customer_pk)
return Transaction.objects.filter(
payment_method=payment_method
)
else:
return Transaction.objects.filter(
payment_method__customer__pk=customer_pk
)
def perform_create(self, serializer):
payment_method_id = self.kwargs.get('payment_method_id')
if payment_method_id:
payment_method = get_object_or_404(PaymentMethod,
id=payment_method_id)
serializer.save(payment_method=payment_method)
else:
serializer.save()
class TransactionDetail(RetrieveUpdateAPIView):
permission_classes = (permissions.AllowAny,)
serializer_class = TransactionSerializer
http_method_names = ('get', 'patch', 'head', 'options')
def get_object(self):
transaction_uuid = self.kwargs.get('transaction_uuid', None)
try:
uuid = UUID(transaction_uuid, version=4)
except ValueError:
raise Http404
return get_object_or_404(Transaction, uuid=uuid)
class TransactionAction(APIView):
permission_classes = (permissions.IsAuthenticated,)
allowed_actions = ('cancel', )
def post(self, request, *args, **kwargs):
transaction = self.get_object(**kwargs)
requested_action = kwargs.get('requested_action')
if requested_action not in self.allowed_actions:
error_message = "{} is not an allowed".format(requested_action)
return Response({"errors": error_message},
status=status.HTTP_400_BAD_REQUEST)
action_to_execute = getattr(transaction, requested_action, None)
if not action_to_execute:
raise Http404
try:
errors = action_to_execute()
transaction.save()
except TransitionNotAllowed:
errors = "Can't execute action because the transaction is in an " \
"incorrect state: {}".format(transaction.state)
if errors:
return Response({"errors": errors},
status=status.HTTP_400_BAD_REQUEST)
transaction_serialized = TransactionSerializer(transaction,
context={'request': request})
return Response(transaction_serialized.data,
status=status.HTTP_200_OK)
def get_object(self, **kwargs):
transaction_uuid = kwargs.get('transaction_uuid')
customer_pk = kwargs.get('customer_pk')
return get_object_or_404(
Transaction.objects.all(),
uuid=transaction_uuid,
payment_method__customer__pk=customer_pk
)
| apache-2.0 | 2,897,298,705,134,729,000 | 35.97561 | 95 | 0.645339 | false |
maojrs/riemann_book | exact_solvers/burgers_demos.py | 2 | 10422 | """
Additional functions and demos for Burgers' equation.
"""
import sys, os
from clawpack import pyclaw
from clawpack import riemann
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import HTML
import numpy as np
from utils import riemann_tools
from . import burgers
def multivalued_solution(t,fig=0):
"""Plots bump-into-wave figure at different times for interactive figure."""
if fig==0:
fig = plt.figure()
x = np.arange(-11.0,11.0,0.1)
y = np.exp(-x*x/10)
x2 = 1.0*x
x2 = x2 + t*y
plt.plot(x, y, '--k', label = "Initial Condition")
plt.plot(x2, y, '-k', label = r"Solution at time $t$")
plt.xlim([-10,10])
plt.legend(loc = 'upper left')
plt.title('t = %.2f' % t)
if t != 0:
numarrows = 7
arrowIndexList = np.linspace(len(x)/3,2*len(x)/3,numarrows, dtype = int)
for i in arrowIndexList:
plt.arrow(x[i], y[i], np.abs(t*y[i]-0.4), 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
if fig==0: plt.show()
def shock():
"""Returns plot function for a shock solution."""
q_l, q_r = 5.0, 1.0
states, speeds, reval, wave_type = burgers.exact_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def shock_location(xshock=7.75,fig=0):
"""Plots equal-area shock figure for different shock positions for interactive figure."""
if fig==0:
fig = plt.figure()
t=10
x = np.arange(-11.0,11.0,0.05)
y = np.exp(-x*x/10)
x = x + t*y
x2 = 1.0*x
y2 = 1.0*y
region = -1
for i in range(len(x)):
if (x2[i] >= xshock and region == -1):
region = 0
maxy = 1.0*y[i-1]
if (x2[i] >= xshock and region == 0):
x2[i] = 1.0*xshock
y2[i] = 1.0*maxy
if (x2[i] < xshock and region == 0):
region = 1
maxy = 1.0*y[i-1]
if (x2[i] <= xshock and region == 1):
x2[i] = 1.0*xshock
y2[i] = 1.0*maxy
if (x2[i] > xshock and region == 1):
region = 2
plt.plot(x, y, '-k', lw = 2, label = "Multivalued solution")
plt.plot(x2, y2, '--r', lw = 2, label = "Shock solution")
if (xshock == 7.75):
plt.annotate(r"$A_1$", xy=(2, 0), xytext=(8.5,0.83), fontsize=15)
plt.annotate(r"$A_2$", xy=(2, 0), xytext=(6.5,0.15), fontsize=15)
plt.annotate(r"Equal Areas", xy=(2, 0), xytext=(-3,0.62), fontsize=15)
plt.annotate(r"$A_1=A_2$", xy=(2, 0), xytext=(-2.5,0.5), fontsize=15)
plt.xlim([-7.5,11])
plt.legend(loc = 'upper left')
if fig==0: plt.show()
def rarefaction_figure(t):
"""Plots rarefaction figure at different times for interactive figure."""
numarrows = 6
x = [-5., 0.0]
y = [0.2, 0.2]
for i in range(numarrows):
x.append(0.0)
y.append(y[0] + (i+1)*(1.0-y[0])/(numarrows+1))
x.extend([0.0,10.0])
y.extend([1.0,1.0])
x2 = 1.0*np.array(x)
x2[1:-1] = x2[1:-1] + t*np.array(y[1:-1])
plt.plot(x, y, '--k', label = "Initial Condition")
plt.plot(x2, y, '-k', label = r"Solution at time $t$")
plt.xlim([-5,10])
plt.ylim([0.0,1.2])
plt.legend(loc = 'upper left')
plt.title('t = %.2f' % t)
if t != 0:
for i in range(numarrows):
plt.arrow(x[2+i], y[2+i], np.abs(t*y[2+i]-0.4), 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
plt.annotate(r"$q_r t$", xy=(2, 1), xytext=(t/2-0.2, 1.05), fontsize=12)
if t > 2:
plt.annotate(r"$q_\ell t$", xy=(2, 0), xytext=(t/8-0.4, 0.12), fontsize=12)
plt.arrow(t/2-0.3, 1.07, -t/2+0.8, 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
plt.arrow(t/2+0.7, 1.07, t*y[-1] - t/2 - 1, 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
def rarefaction():
"""Returns plot function for a rarefaction solution."""
q_l, q_r = 2.0, 4.0
states, speeds, reval, wave_type = burgers.exact_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def unphysical():
"""Returns plot function for an unphysical solution."""
q_l, q_r = 1.0, 5.0
states, speeds, reval, wave_type = burgers.unphysical_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def bump_animation(numframes):
"""Plots animation of solution with bump initial condition,
using pyclaw (calls bump_pyclaw)."""
x, frames = bump_pyclaw(numframes)
fig = plt.figure()
ax = plt.axes(xlim=(-1, 1), ylim=(-0.2, 1.2))
line, = ax.plot([], [], '-k', lw=2)
def fplot(frame_number):
frame = frames[frame_number]
pressure = frame.q[0,:]
line.set_data(x,pressure)
return line,
anim = animation.FuncAnimation(fig, fplot, frames=len(frames), interval=30)
plt.close('all')
#return HTML(anim.to_jshtml())
return anim.to_jshtml()
def bump_pyclaw(numframes):
"""Returns pyclaw solution of bump initial condition."""
# Set pyclaw for burgers equation 1D
claw = pyclaw.Controller()
claw.tfinal = 1.5 # Set final time
claw.keep_copy = True # Keep solution data in memory for plotting
claw.output_format = None # Don't write solution data to file
claw.num_output_times = numframes # Number of output frames
claw.solver = pyclaw.ClawSolver1D(riemann.burgers_1D) # Choose burgers 1D Riemann solver
claw.solver.all_bcs = pyclaw.BC.periodic # Choose periodic BCs
claw.verbosity = False # Don't print pyclaw output
domain = pyclaw.Domain( (-1.,), (1.,), (500,)) # Choose domain and mesh resolution
claw.solution = pyclaw.Solution(claw.solver.num_eqn,domain)
# Set initial condition
x=domain.grid.x.centers
claw.solution.q[0,:] = np.exp(-10 * (x)**2)
claw.solver.dt_initial = 1.e99
# Run pyclaw
status = claw.run()
return x, claw.frames
def triplestate_animation(ql, qm, qr, numframes):
"""Plots animation of solution with triple-state initial condition, using pyclaw (calls
triplestate_pyclaw). Also plots characteristic structure by plotting contour plots of the
solution in the x-t plane """
# Get solution for animation and set plot
x, frames = triplestate_pyclaw(ql, qm, qr, numframes)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9,4))
ax1.set_xlim(-3, 3)
ax1.set_ylim(-3, 5)
ax2.set_xlim(-3, 3)
ax2.set_ylim(0, 2)
ax1.set_title('Solution q(x)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$q$')
ax2.set_title('Characteristics')
ax2.set_xlabel('$x$')
ax2.set_ylabel('$t$')
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
line1, = ax1.plot([], [], '-k', lw=2)
# Contour plot of high-res solution to show characteristic structure in xt-plane
meshpts = 2400
numframes2 = 600
x2, frames2 = triplestate_pyclaw(ql, qm, qr, numframes2)
characs = np.zeros([numframes2,meshpts])
xx = np.linspace(-12,12,meshpts)
tt = np.linspace(0,2,numframes2)
for j in range(numframes2):
characs[j] = frames2[j].q[0]
X,T = np.meshgrid(xx,tt)
ax2.contour(X, T, characs, levels=np.linspace(ql, ql+0.11 ,20), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, levels=np.linspace(qm+0.11, qm+0.13 ,7), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, levels=np.linspace(qr+0.13, qr+0.2 ,15), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, 12, linewidths=0.5, colors='k')
#ax2.contour(X, T, characs, 38, colors='k')
# Add animated time line to xt-plane
line2, = ax2.plot(x, 0*x , '--k')
line = [line1, line2]
# Update data function for animation
def fplot(frame_number):
frame = frames[frame_number]
pressure = frame.q[0,:]
line[0].set_data(x,pressure)
line[1].set_data(x,0*x+frame.t)
return line
anim = animation.FuncAnimation(fig, fplot, frames=len(frames), interval=30, blit=False)
plt.close('all')
#return HTML(anim.to_jshtml())
return anim.to_jshtml()
def triplestate_pyclaw(ql, qm, qr, numframes):
"""Returns pyclaw solution of triple-state initial condition."""
# Set pyclaw for burgers equation 1D
meshpts = 2400 #600
claw = pyclaw.Controller()
claw.tfinal = 2.0 # Set final time
claw.keep_copy = True # Keep solution data in memory for plotting
claw.output_format = None # Don't write solution data to file
claw.num_output_times = numframes # Number of output frames
claw.solver = pyclaw.ClawSolver1D(riemann.burgers_1D) # Choose burgers 1D Riemann solver
claw.solver.all_bcs = pyclaw.BC.extrap # Choose periodic BCs
claw.verbosity = False # Don't print pyclaw output
domain = pyclaw.Domain( (-12.,), (12.,), (meshpts,)) # Choose domain and mesh resolution
claw.solution = pyclaw.Solution(claw.solver.num_eqn,domain)
# Set initial condition
x=domain.grid.x.centers
q0 = 0.0*x
xtick1 = 900 + int(meshpts/12)
xtick2 = xtick1 + int(meshpts/12)
for i in range(xtick1):
q0[i] = ql + i*0.0001
#q0[0:xtick1] = ql
for i in np.arange(xtick1, xtick2):
q0[i] = qm + i*0.0001
#q0[xtick1:xtick2] = qm
for i in np.arange(xtick2, meshpts):
q0[i] = qr + i*0.0001
#q0[xtick2:meshpts] = qr
claw.solution.q[0,:] = q0
claw.solver.dt_initial = 1.e99
# Run pyclaw
status = claw.run()
return x, claw.frames
| bsd-3-clause | 5,794,931,405,617,469,000 | 38.477273 | 112 | 0.572059 | false |
unicef/un-partner-portal | backend/unpp_api/settings/base.py | 1 | 10700 | from __future__ import absolute_import
import os
import sys
####
# Change per project
####
from django.urls import reverse_lazy
from django.utils.text import slugify
PROJECT_NAME = 'unpp_api'
# project root and add "apps" to the path
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PROJECT_ROOT, 'apps/'))
# domains/hosts etc.
DOMAIN_NAME = os.getenv('DJANGO_ALLOWED_HOST', 'localhost')
WWW_ROOT = 'http://%s/' % DOMAIN_NAME
ALLOWED_HOSTS = [DOMAIN_NAME]
FRONTEND_HOST = os.getenv('UNPP_FRONTEND_HOST', DOMAIN_NAME)
####
# Other settings
####
ADMINS = (
('Alerts', os.getenv('ALERTS_EMAIL') or '[email protected]'),
('Tivix', f'unicef-unpp+{slugify(DOMAIN_NAME)}@tivix.com'),
)
SANCTIONS_LIST_URL = 'https://scsanctions.un.org/resources/xml/en/consolidated.xml'
SITE_ID = 1
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
SECRET_KEY = os.getenv('SECRET_KEY')
DEFAULT_CHARSET = 'utf-8'
ROOT_URLCONF = 'unpp_api.urls'
DATA_VOLUME = os.getenv('DATA_VOLUME', '/data')
ALLOWED_EXTENSIONS = (
'pdf', 'doc', 'docx', 'xls', 'xlsx' 'img', 'png', 'jpg', 'jpeg', 'csv', 'zip'
)
UPLOADS_DIR_NAME = 'uploads'
MEDIA_URL = f'/api/{UPLOADS_DIR_NAME}/'
MEDIA_ROOT = os.getenv('UNPP_UPLOADS_PATH', os.path.join(DATA_VOLUME, UPLOADS_DIR_NAME))
FILE_UPLOAD_MAX_MEMORY_SIZE = 25 * 1024 * 1024 # 25mb
DATA_UPLOAD_MAX_MEMORY_SIZE = 50 * 1024 * 1024
# static resources related. See documentation at: http://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/
STATIC_URL = '/api/static/'
STATIC_ROOT = f'{DATA_VOLUME}/staticserve'
# static serving
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
DEBUG = True
IS_DEV = False
IS_STAGING = False
IS_PROD = False
UN_SANCTIONS_LIST_EMAIL_ALERT = '[email protected]' # TODO - change to real one
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', 'UNPP Stage <[email protected]>')
EMAIL_HOST = os.getenv('EMAIL_HOST')
EMAIL_PORT = os.getenv('EMAIL_PORT')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', '').lower() == 'true'
# Get the ENV setting. Needs to be set in .bashrc or similar.
ENV = os.getenv('ENV')
if not ENV:
raise Exception('Environment variable ENV is required!')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DB'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': 5432,
}
}
POSTGRES_SSL_MODE = os.getenv('POSTGRES_SSL_MODE', 'off')
if POSTGRES_SSL_MODE == 'on':
DATABASES['default'].update({'OPTIONS': {"sslmode": 'require'}})
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'account.authentication.CustomSocialAuthExceptionMiddleware',
'common.middleware.ActivePartnerMiddleware',
'common.middleware.ActiveAgencyOfficeMiddleware',
'common.middleware.ClientTimezoneMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
'django.template.context_processors.request',
],
},
},
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.humanize',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django_filters',
'imagekit',
'django_countries',
'mail_templated',
'social_django',
'sequences.apps.SequencesConfig',
'django_nose',
'background_task',
'common',
'account',
'agency',
'partner',
'project',
'review',
'storages',
'notification',
'sanctionslist',
'management',
'reports',
'externals',
]
# auth / django-registration params
AUTH_USER_MODEL = 'account.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 12,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
PASSWORD_RESET_TIMEOUT_DAYS = 31
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.CustomAzureADBBCOAuth2',
]
# Django-social-auth settings
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_KEY = os.getenv('AZURE_B2C_CLIENT_ID', None)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_SECRET = os.getenv('AZURE_B2C_CLIENT_SECRET', None)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_POLICY = os.getenv('AZURE_B2C_POLICY_NAME', "b2c_1A_UNICEF_PARTNERS_signup_signin")
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_PW_RESET_POLICY = os.getenv(
'AZURE_B2C_PW_RESET_POLICY_NAME', "B2C_1_PasswordResetPolicy"
)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_TENANT_ID = os.getenv('AZURE_B2C_TENANT', 'unicefpartners.onmicrosoft.com')
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_SCOPE = [
'openid', 'email', 'profile',
]
IGNORE_DEFAULT_SCOPE = True
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = reverse_lazy('accounts:social-logged-in')
SOCIAL_AUTH_PIPELINE = (
'account.authentication.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'account.authentication.require_email',
'social_core.pipeline.social_auth.associate_by_email',
'account.authentication.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'account.authentication.user_details',
)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_USER_FIELDS = [
'email', 'fullname'
]
TEST_RUNNER = os.getenv('DJANGO_TEST_RUNNER', 'django.test.runner.DiscoverRunner')
NOSE_ARGS = ['--with-timer', '--nocapture', '--nologcapture']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
REST_AUTH_SERIALIZERS = {
'LOGIN_SERIALIZER': 'account.serializers.CustomLoginSerializer',
'USER_DETAILS_SERIALIZER': 'account.serializers.SimpleAccountSerializer',
'PASSWORD_RESET_SERIALIZER': 'account.serializers.CustomPasswordResetSerializer',
}
# helper function to extend all the common lists
def extend_list_avoid_repeats(list_to_extend, extend_with):
"""Extends the first list with the elements in the second one, making sure its elements are not already there in the
original list."""
list_to_extend.extend(filter(lambda x: not list_to_extend.count(x), extend_with))
LOG_LEVEL = 'DEBUG' if DEBUG and 'test' not in sys.argv else 'INFO'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s line %(lineno)d: %(message)s'
},
'verbose': {
'format': '[%(asctime)s][%(levelname)s][%(name)s] %(filename)s.%(funcName)s:%(lineno)d %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'console': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['mail_admins', 'default'],
'level': 'ERROR',
'propagate': False,
},
'django.security.DisallowedHost': {
# Skip "SuspiciousOperation: Invalid HTTP_HOST" e-mails.
'handlers': ['default'],
'propagate': False,
},
}
}
UNHCR_API_HOST = os.getenv('UNHCR_API_HOST')
UNHCR_API_USERNAME = os.getenv('UNHCR_API_USERNAME')
UNHCR_API_PASSWORD = os.getenv('UNHCR_API_PASSWORD')
UNICEF_PARTNER_DETAILS_URL = os.getenv('UNICEF_PARTNER_DETAILS_URL')
UNICEF_API_USERNAME = os.getenv('UNICEF_API_USERNAME')
UNICEF_API_PASSWORD = os.getenv('UNICEF_API_PASSWORD')
WFP_API_HOST = os.getenv('WFP_API_HOST')
WFP_API_TOKEN = os.getenv('WFP_API_TOKEN')
GIT_VERSION = os.getenv('GIT_VERSION', 'UNKNOWN')
REDIS_INSTANCE = os.getenv('REDIS_INSTANCE')
if REDIS_INSTANCE:
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'redis://{REDIS_INSTANCE}/1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
'TIMEOUT': 3600
}
}
DJANGO_REDIS_IGNORE_EXCEPTIONS = not DEBUG
else:
CACHES = {
'default': {
'BACKEND': 'common.cache_backends.DummyRedisCache',
'LOCATION': 'unpp'
}
}
SESSION_COOKIE_HTTPONLY = True
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
| apache-2.0 | 250,126,248,760,582,600 | 30.378299 | 120 | 0.647477 | false |
hendrycks/robustness | old/Icons-50/models/wrn.py | 1 | 3908 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| apache-2.0 | -3,741,773,476,238,839,300 | 39.708333 | 116 | 0.56781 | false |
Connexions/cnx-authoring | cnxauthoring/tests/test_functional.py | 1 | 173550 | # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Functional tests of API."""
import datetime
import json
import os
import sys
import re
import unittest
from copy import deepcopy
try:
from unittest import mock # python 3
except ImportError:
import mock # python 2
try:
import urllib.request as urllib2 # renamed in python3
except ImportError:
import urllib2 # noqa python2
try:
from urllib.parse import urljoin
except:
from urlparse import urljoin
import cnxepub
import psycopg2
import pytz
import requests
from webtest import Upload
from wsgi_intercept import requests_intercept
from .intercept import (install_intercept, uninstall_intercept,
publishing_settings)
from .testing import integration_test_settings, get_data
from ..models import DEFAULT_LICENSE, TZINFO
USER_PROFILE = {
u'username': u'user1',
u'id': 1,
u'first_name': u'User',
u'last_name': u'One',
}
SUBMITTER = {
u'id': u'user1',
u'firstname': u'User',
u'surname': u'One',
u'fullname': u'User One',
u'type': u'cnx-id',
}
SUBMITTER_WITH_ACCEPTANCE = SUBMITTER.copy()
SUBMITTER_WITH_ACCEPTANCE[u'hasAccepted'] = True
SUBMITTER_WITH_ACCEPTANCE[u'requester'] = SUBMITTER['id']
class BaseFunctionalTestCase(unittest.TestCase):
accounts_request_return = ''
maxDiff = None
@classmethod
def setUpClass(cls):
cls.settings = settings = integration_test_settings()
# only run once for all the tests
# Install the intercept for archive and publishing.
install_intercept()
requests_intercept.install()
# make sure storage is set correctly in cnxauthoring.views by reloading
# cnxauthoring.views
if 'cnxauthoring.views' in sys.modules:
del sys.modules['cnxauthoring.views']
from .. import main
app = main({}, **settings)
from webtest import TestApp
cls.testapp = TestApp(app)
# Allow user1 to publish without moderation
with psycopg2.connect(
publishing_settings()['db-connection-string']) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
INSERT INTO users
(username, first_name, last_name, is_moderated)
VALUES ('user1', 'User', 'One', true)""")
@classmethod
def tearDownClass(cls):
from ..storage import storage
if hasattr(storage, 'conn'):
storage.conn.close()
# Uninstall the intercept for archive and publishing.
requests_intercept.uninstall()
uninstall_intercept()
def setUp(self):
# All tests start with a login.
self.login()
self.addCleanup(self.logout)
def login(self, username='user1', password='password', login_url='/login',
headers=None):
headers = headers or {}
response = self.testapp.get(login_url, headers=headers, status=302)
response = self.testapp.post(response.headers['Location'], {
'username': username,
'password': password,
})
return self.testapp.get(response.headers['Location'])
def logout(self):
self.testapp.get('/logout', status=302)
def assert_cors_headers(self, response, cache_message_special_case=None):
self.assertEqual(response.headers['Access-Control-Allow-Credentials'],
'true')
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost:8000')
self.assertEqual(response.headers['Access-Control-Allow-Headers'],
'Origin, Content-Type')
self.assertEqual(response.headers['Access-Control-Allow-Methods'],
'GET, OPTIONS, PUT, POST')
cache_header_dictionary = {
'201 Created':
['max-age=0, must-revalidate, no-cache, no-store, public'],
'200 OK':
['max-age=0, must-revalidate, no-cache, no-store, public'],
'400 Bad Request': [],
'302 Found': [],
'401 Unauthorized': [],
'403 Forbidden': [],
'404 Not Found': [],
}
try:
expected_cache_header = cache_header_dictionary[response.status]
except KeyError:
expected_cache_header = "NO EXPECTED CACHE HEADER"
actual_cache_header = response.headers.getall('Cache-Control')
if cache_message_special_case:
self.assertEqual(actual_cache_header, cache_message_special_case)
else:
self.assertEqual(actual_cache_header, expected_cache_header)
class FunctionalTests(BaseFunctionalTestCase):
def test_options(self):
self.testapp.options('/', status=404)
self.testapp.options('/some-random.html', status=404)
urls = ['/*', '/login', '/logout', '/callback',
'/contents/[email protected]', '/resources/hash',
'/contents', '/resources', '/users/search',
'/users/profile', '/users/contents', '/users/contents/search']
for url in urls:
response = self.testapp.options(url, status=200)
self.assert_cors_headers(
response, cache_message_special_case=['public'])
self.assertEqual(response.headers['Content-Length'], '0')
def test_get_content_401(self):
self.logout()
response = self.testapp.get('/contents/[email protected]',
status=401)
self.assert_cors_headers(response)
def test_get_content_404(self):
response = self.testapp.get('/contents/[email protected]',
status=404)
self.assert_cors_headers(response)
def test_get_content_403(self):
response = self.testapp.post_json(
'/users/contents',
{'title': 'My New Document'}, status=201)
content = response.json
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(
'/contents/{}@draft.json'
.format(content['id']), status=403)
self.assertTrue('You do not have permission to view'
in response.body.decode('utf-8'))
response = self.testapp.post_json('/users/contents', {
'title': 'My New Binder',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
content = response.json
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(
'/contents/{}@draft.json'
.format(content['id']), status=403)
self.assertTrue('You do not have permission to view'
in response.body.decode('utf-8'))
def test_get_content_for_document(self):
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', {
'title': 'My New Document',
'created': u'2014-03-13T15:21:15-05:00',
'revised': u'2014-03-13T15:21:15-05:00',
}, status=201)
put_result = response.json
response = self.testapp.get('/contents/{}@draft.json'.format(
put_result['id']), status=200)
get_result = response.json
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(get_result, {
u'id': get_result['id'],
u'title': u'My New Document',
u'containedIn': [],
u'content': u'',
u'created': get_result['created'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'revised': get_result['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'language': u'en',
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'abstract': u'',
u'version': u'draft',
u'subjects': [],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'keywords': [],
u'state': u'Draft',
u'publication': None,
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'translators': [],
u'editors': [],
u'illustrators': [],
u'printStyle': None,
})
self.assertEqual(put_result, get_result)
self.assert_cors_headers(response)
def test_post_content_401(self):
self.logout()
response = self.testapp.post('/users/contents', status=401)
self.assert_cors_headers(response)
def test_post_content_403(self):
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'}, status=403)
self.assert_cors_headers(response)
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=403)
self.assert_cors_headers(response)
def test_post_content_invalid_json(self):
response = self.testapp.post(
'/users/contents', 'invalid json', status=400)
self.assertTrue('Invalid JSON' in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_post_content_empty(self):
response = self.testapp.post_json(
'/users/contents', {}, status=400)
self.assertEqual(response.json, {
u'title': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_empty_binder(self):
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
}, status=400)
self.assertEqual(response.json, {
u'title': u'Required',
u'tree': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_unknown_media_type(self):
response = self.testapp.post_json('/users/contents', {
'mediaType': 'unknown-media-type',
}, status=400)
self.assertEqual(response.json, {
u'media_type': u'"unknown-media-type" is not one of '
u'application/vnd.org.cnx.module, '
u'application/vnd.org.cnx.collection',
u'title': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_minimal(self):
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'}, status=201)
result = response.json
self.assertEqual(result['title'], u'My document タイトル')
self.assertEqual(result['language'], u'en')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_document_printStyle(self):
response = self.testapp.post_json(
'/users/contents',
{
'title': u'My document タイトル',
'printStyle': u'pdf print style string'
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My document タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['printStyle'], 'pdf print style string')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_minimal_binder(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'title': result['title'],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'title': result['title'],
})
self.assert_cors_headers(response)
def test_post_content_minimal_binder_with_printStyle(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
'printStyle': "*PDF print style*"
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'title': result['title'],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
})
self.assertEqual(result['printStyle'], '*PDF print style*')
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'title': result['title'],
})
self.assertEqual(result['printStyle'], '*PDF print style*')
self.assert_cors_headers(response)
def test_post_content_binder_document_not_found(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': 'page@draft',
'title': 'Page one',
},
],
},
}, status=400)
self.assert_cors_headers(response)
self.assertTrue('Document Not Found: page@draft' in
response.body.decode('utf-8'))
def test_post_content_multiple(self):
post_data = [
{'title': u'My document タイトル 1'},
{'title': u'My document タイトル 2'},
]
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['title'], u'My document タイトル 1')
self.assertEqual(result[1]['title'], u'My document タイトル 2')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result[0]['id']), status=200)
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result[1]['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_derived_from_not_found(self):
post_data = {'derivedFrom': u'notfound@1'}
response = self.testapp.post_json(
'/users/contents', post_data, status=400)
self.assertTrue(b'Derive failed' in response.body)
self.assert_cors_headers(response)
def test_post_content_derived_from_no_version(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(content.startswith('<html'))
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertNotIn('2011-10-05', result.pop('created'))
self.assertNotIn('2011-10-12', result.pop('revised'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': '{}@1'.format(post_data['derivedFrom']),
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}@1'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': '{}@1'.format(post_data['derivedFrom']),
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}@1'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_derived_from(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
# Create the derived content
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(content.startswith('<html'))
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertNotIn('2011-10-05', result.pop('created'))
self.assertNotIn('2011-10-12', result.pop('revised'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_derived_from_w_missing_resource(self):
post_data = {
'derivedFrom': u'a3f7c934-2a89-4baf-a9a9-a89d957586d2@1',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(u'missing resource' in content)
self.assertTrue(content.startswith('<html'))
self.assertFalse('2011-10-12' in result.pop('created'))
self.assertTrue(result.pop('revised') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'missing resource',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of missing resource',
u'abstract': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'missing resource' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'missing resource',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of missing resource',
u'abstract': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_post_content_derived_from_binder(self):
post_data = {
'derivedFrom': u'[email protected]',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
self.assertTrue(result.pop('revised') is not None)
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('abstract') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date[u'assignmentDate'] = unicode(
now.astimezone(TZINFO).isoformat())
expected = {
u'areContainedPublishable': False,
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of <span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'printStyle': None,
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'subjects': [],
u'tree': {
u'id': u'{}@draft'.format(result['id']),
u'title': u'Copy of <span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [
{u'id': u'209deb1f-1a46-4369-9e0d-18674cf58a3e@7',
u'title': u'Preface'},
{u'id': u'[email protected]',
u'title': u'Introduction: The Nature of Science and Physics',
u'contents': [
{u'id': u'f3c9ab70-a916-4d8c-9256-42953287b4e9@3',
u'title': u'Introduction to Science and the Realm of Physics, Physical Quantities, and Units'},
{u'id': u'd395b566-5fe3-4428-bcb2-19016e3aa3ce@4',
u'title': u'Physics: An Introduction'},
{u'id': u'c8bdbabc-62b1-4a5f-b291-982ab25756d7@6',
u'title': u'Physical Quantities and Units'},
{u'id': u'5152cea8-829a-4aaf-bcc5-c58a416ecb66@7',
u'title': u'Accuracy, Precision, and Significant Figures'},
{u'id': u'5838b105-41cd-4c3d-a957-3ac004a48af3@5',
u'title': u'Approximation'}]},
{u'id': u'[email protected]',
u'title': u"Further Applications of Newton's Laws: Friction, Drag, and Elasticity",
u'contents': [
{u'id': u'24a2ed13-22a6-47d6-97a3-c8aa8d54ac6d@2',
u'title': u'Introduction: Further Applications of Newton\u2019s Laws'},
{u'id': u'ea271306-f7f2-46ac-b2ec-1d80ff186a59@5',
u'title': u'Friction'},
{u'id': u'26346a42-84b9-48ad-9f6a-62303c16ad41@6',
u'title': u'Drag Forces'},
{u'id': u'56f1c5c1-4014-450d-a477-2121e276beca@8',
u'title': u'Elasticity: Stress and Strain'}]},
{u'id': u'f6024d8a-1868-44c7-ab65-45419ef54881@3',
u'title': u'Atomic Masses'},
{u'id': u'7250386b-14a7-41a2-b8bf-9e9ab872f0dc@2',
u'title': u'Selected Radioactive Isotopes'},
{u'id': u'c0a76659-c311-405f-9a99-15c71af39325@5',
u'title': u'Useful Inf\xf8rmation'},
{u'id': u'ae3e18de-638d-4738-b804-dc69cd4db3a3@4',
u'title': u'Glossary of Key Symbols and Notation'}]},
}
self.assertEqual(result, expected)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertTrue(result.pop('abstract') is not None)
self.assertEqual(result, expected)
self.assert_cors_headers(response)
def test_post_content_revision_403(self):
self.logout()
self.login('user2')
post_data = {
'id': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=403)
def test_post_content_revision_404(self):
post_data = {
'id': 'edf794be-28bc-4242-8ae2-b043e4dd32ef@1',
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=404)
def test_post_content_revision(self):
self.logout()
self.login('Rasmus1975')
post_data = {
'id': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u'Turning DNA through resonance',
'abstract': u'Theories on turning DNA structures',
'language': u'en',
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
now = datetime.datetime.now(TZINFO)
formatted_now = now.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2011-10-05'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
# FIXME the user info we have in archive differs from
# that here in authoring.
rasmus_user_info = {
u'firstname': u'Rasmus',
u'fullname': u'Rasmus Ruby',
u'id': u'Rasmus1975',
u'surname': u'Ruby',
u'type': u'cnx-id',
}
rasmus_role = rasmus_user_info.copy()
rasmus_role.update({
u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'requester': rasmus_user_info['id'],
u'surname': None,
u'fullname': u'Rasmus de 1975',
})
self.assertEqual(result, {
u'abstract': u'Theories on turning DNA structures',
u'authors': [rasmus_role],
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'copyrightHolders': [rasmus_role],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'editors': [],
u'id': post_data['id'].split('@')[0],
u'illustrators': [],
u'keywords': [u'DNA', u'resonance'],
u'language': u'en',
u'licensors': [rasmus_role],
u'mediaType': u'application/vnd.org.cnx.module',
u'permissions': [u'edit', u'publish', u'view'],
u'isPublishable': True,
u'publishBlockers': None,
u'publication': None,
u'publishers': [rasmus_role],
u'state': u'Draft',
u'subjects': [u'Science and Technology'],
u'submitter': rasmus_user_info,
u'title': u'Turning DNA through resonance',
u'translators': [],
u'version': u'draft',
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': rasmus_user_info,
u'authors': [rasmus_role],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [rasmus_role],
u'id': result['id'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'title': u'Turning DNA through resonance',
u'abstract': u'Theories on turning DNA structures',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [u'Science and Technology'],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [u'DNA', u'resonance'],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [rasmus_role],
u'copyrightHolders': [rasmus_role],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_revision_w_multiroles(self):
self.logout()
self.login('OpenStaxCollege')
post_data = {
'id': u'[email protected]',
}
now = datetime.datetime.now(TZINFO)
formatted_now = unicode(now.astimezone(TZINFO).isoformat())
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
# Test the object for internal data correctness
from ..storage import storage
document = storage.get(id=result['id'])
self.assertEqual(
sorted(document.licensor_acceptance, key=lambda v: v['id']),
[{'has_accepted': True, 'id': 'OSCRiceUniversity'},
{'has_accepted': True, 'id': 'OpenStaxCollege'},
{'has_accepted': True, 'id': 'cnxcap'}])
# Test the response data
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2013-07-31'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
abstract = result.pop('abstract')
self.assertTrue('two-semester college physics book' in abstract)
keywords = result.pop('keywords')
self.assertIn('drag', keywords)
# Test the tree for contents.
tree = result.pop('tree')
flattener = cnxepub.flatten_tree_to_ident_hashes(tree)
contained_ids = [id for id in flattener]
self.assertIn(u'e79ffde3-7fb4-4af3-9ec8-df648b391597@draft',
contained_ids)
self.assertIn(u'56f1c5c1-4014-450d-a477-2121e276beca@8',
contained_ids)
# FIXME the user info we have in archive differs from
# that here in authoring.
osc_user_info = {
u'firstname': u'Test',
u'fullname': u'Test User',
u'id': u'OpenStaxCollege',
u'surname': u'User',
u'type': u'cnx-id',
}
osc_role = osc_user_info.copy()
osc_role.update({
u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'firstname': u'OpenStax College',
u'fullname': u'OpenStax College',
u'requester': u'OpenStaxCollege',
u'surname': None,
})
cnxcap_role = {
u'assignmentDate': formatted_now,
u'firstname': u'College',
u'fullname': u'OSC Physics Maintainer',
u'hasAccepted': True,
u'id': u'cnxcap',
u'requester': u'OpenStaxCollege',
u'surname': u'Physics',
u'type': u'cnx-id',
}
rice_role = {
u'assignmentDate': formatted_now,
u'firstname': u'Rice',
u'fullname': u'Rice University',
u'hasAccepted': True,
u'id': u'OSCRiceUniversity',
u'requester': u'OpenStaxCollege',
u'surname': u'University',
u'type': u'cnx-id',
}
expected = {
u'areContainedPublishable': False,
u'authors': [osc_role],
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'content': u'',
u'copyrightHolders': [rice_role],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'editors': [],
u'id': post_data['id'].split('@')[0],
u'illustrators': [],
u'isPublishable': True,
u'publishBlockers': None,
u'language': u'en',
u'licensors': [rice_role],
u'mediaType': u'application/vnd.org.cnx.collection',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'publishers': [osc_role, cnxcap_role],
u'state': u'Draft',
u'subjects': [
u'Mathematics and Statistics',
u'Science and Technology',
u'OpenStax Featured'],
u'submitter': osc_user_info,
u'title': u'College Physics',
u'translators': [],
u'version': u'draft',
u'printStyle': None,
}
self.assertEqual(result, expected)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2013-07-31'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
abstract = result.pop('abstract')
self.assertTrue('two-semester college physics book' in abstract)
keywords = result.pop('keywords')
self.assertIn('drag', keywords)
# Test the tree for contents.
tree = result.pop('tree')
flattener = cnxepub.flatten_tree_to_ident_hashes(tree)
contained_ids = [id for id in flattener]
self.assertIn(u'e79ffde3-7fb4-4af3-9ec8-df648b391597@draft',
contained_ids)
self.assertIn(u'56f1c5c1-4014-450d-a477-2121e276beca@8',
contained_ids)
self.assertEqual(result, expected)
self.assert_cors_headers(response)
def test_post_content(self):
post_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'created': u'2014-03-13T15:21:15.677617',
'revised': u'2014-03-13T15:21:15.677617',
'license': {'url': DEFAULT_LICENSE.url},
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
'editors': [SUBMITTER],
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], post_data['license']['url'])
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], post_data['license']['url'])
created = result.pop('created')
self.assertTrue(created.startswith('2014-03-13T15:21:15.677617'))
revised = result.pop('revised')
self.assertTrue(revised.startswith('2014-03-13T15:21:15.677617'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'title': post_data['title'],
u'abstract': post_data['abstract'],
u'language': post_data['language'],
u'containedIn': [],
u'content': post_data['content'],
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'subjects': post_data['subjects'],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': post_data['keywords'],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [submitter_w_assign_date],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_post_content_binder(self):
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents',
{'title': 'Page one'}, status=201)
page1 = response.json
self.assert_cors_headers(response)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents',
{'title': 'Page two'}, status=201)
page2 = response.json
self.assert_cors_headers(response)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page1['id']),
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': '{}@draft'.format(page2['id']),
'title': 'Page two',
},
],
},
],
},
}, status=201)
book = response.json
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(book['id']), status=200)
result = response.json
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'id': book['id'],
u'title': u'Book',
u'abstract': u'Book abstract',
u'areContainedPublishable': False,
u'containedIn': [],
u'content': u'',
u'mediaType': u'application/vnd.org.cnx.collection',
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'language': u'de',
u'version': u'draft',
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'tree': {
u'id': u'{}@draft'.format(book['id']),
u'title': u'Book',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [
{
u'id': u'{}@draft'.format(page1['id']),
u'title': u'Page one',
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
},
{
u'id': u'subcol',
u'title': u'New section',
u'contents': [
{
u'id': u'{}@draft'.format(page2['id']),
u'title': u'Page two',
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
},
],
},
],
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_put_content_401(self):
self.logout()
response = self.testapp.put_json(
'/contents/[email protected]', {}, status=401)
self.assert_cors_headers(response)
def test_put_content_not_found(self):
response = self.testapp.put_json(
'/contents/[email protected]',
{'title': u'Update document title'}, status=404)
self.assert_cors_headers(response)
def test_put_content_403(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(document['id']),
{'title': 'new title'}, status=403)
self.assertTrue('You do not have permission to edit'
in response.body.decode('utf-8'))
response = self.testapp.post_json('/users/contents', {
'title': u'My binder タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
'language': u'en'}, status=201)
binder = response.json
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
{'title': 'new title'}, status=403)
self.assertTrue('You do not have permission to edit'
in response.body.decode('utf-8'))
def test_put_content_invalid_json(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
self.assert_cors_headers(response)
response = self.testapp.put(
'/contents/{}@draft.json'.format(document['id']),
'invalid json', content_type='application/json', status=400)
self.assertTrue('Invalid JSON' in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_put_content_derived_from(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
self.assert_cors_headers(response)
post_data = {
'content': '<html><body><p>Page content</p></body></html>',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
post_data, status=200)
result = response.json
self.assertEqual(result['content'], post_data['content'])
self.assertEqual(result['revised'], now.astimezone(TZINFO).isoformat())
self.assert_cors_headers(response)
def test_put_content_binder_document_not_found(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
update_data = {
'title': u'...',
'tree': {
'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@draft',
u'title': u'Hygiene',
}],
},
}
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
update_data, status=400)
self.assertTrue(
'Document Not Found: 7d089006-5a95-4e24-8e04-8168b5c41aa3@draft'
in response.body.decode('utf-8'))
def test_put_content_binder(self):
# Create a derived binder
post_data = {
'derivedFrom': u'[email protected]',
}
created = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
binder = response.json
self.assert_cors_headers(response)
update_data = {
'title': u'...',
'abstract': u'...',
'tree': {
'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
}
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
update_data, status=200)
binder = response.json
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date[u'assignmentDate'] = unicode(
created.astimezone(TZINFO).isoformat())
self.assertEqual(binder, {
u'areContainedPublishable': False,
u'created': unicode(created.astimezone(TZINFO).isoformat()),
u'revised': unicode(revised.astimezone(TZINFO).isoformat()),
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': binder['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'abstract': u'...',
u'containedIn': [],
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'title': u'...',
u'tree': {
u'id': u'{}@draft'.format(binder['id']),
u'title': u'...',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
u'subjects': [],
u'keywords': [],
u'isPublishable': True,
u'publishBlockers': None,
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']), status=200)
binder = response.json
self.assertEqual(binder, {
u'areContainedPublishable': False,
u'created': created.astimezone(TZINFO).isoformat(),
u'revised': revised.astimezone(TZINFO).isoformat(),
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': binder['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'abstract': u'...',
u'containedIn': [],
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'title': u'...',
u'tree': {
u'id': u'{}@draft'.format(binder['id']),
u'title': u'...',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
u'subjects': [],
u'keywords': [],
u'isPublishable': True,
u'publishBlockers': None,
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_put_content_binder2(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Empty book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
binder = response.json
created = binder['created']
response = self.testapp.post_json(
'/users/contents', {'title': 'Empty page'}, status=201)
page = response.json
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']), {
'id': '{}@draft'.format(binder['id']),
'downloads': [],
'isLatest': True,
'derivedFrom': None,
'abstract': '',
'revised': '2014-05-02T12:42:09.490860-04:00',
'keywords': [],
'subjects': [],
'publication': None,
'license': {
'url': 'http://creativecommons.org/licenses/by/4.0/',
'version': '4.0',
'name': 'Creative Commons Attribution License',
'abbr': 'by'
},
'language': 'en',
'title': 'etst book',
'created': '2014-05-02T12:42:09.490738-04:00',
'tree': {
'id': '{}@draft'.format(binder['id']),
'title': 'etst book',
'contents': [
{'id': 'f309a0f9-63fb-46ca-9585-d1e1dc96a142@3',
'title':
'Introduction to Two-Dimensional Kinematics'},
{'id': 'e12329e4-8d6c-49cf-aa45-6a05b26ebcba@2',
'title':
'Introduction to One-Dimensional Kinematics'},
{'id': '{}@draft'.format(page['id']),
'title': 'test page'}
]
},
'mediaType': 'application/vnd.org.cnx.collection',
'content': '',
'state': 'Draft',
'version': 'draft',
'submitter': SUBMITTER,
'authors': [SUBMITTER_WITH_ACCEPTANCE],
'publishers': [SUBMITTER_WITH_ACCEPTANCE],
'error': False,
}, status=200)
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']), status=200)
result = response.json
self.assertEqual(result['created'], created)
self.assertEqual(
result['revised'], revised.astimezone(TZINFO).isoformat())
self.assertEqual(result['tree'], {
'id': '{}@draft'.format(binder['id']),
'title': 'etst book',
'isPublishable': True,
'publishBlockers': None,
'contents': [
{
'id': 'f309a0f9-63fb-46ca-9585-d1e1dc96a142@3',
'title': 'Introduction to Two-Dimensional Kinematics'
},
{
'id': 'e12329e4-8d6c-49cf-aa45-6a05b26ebcba@2',
'title': 'Introduction to One-Dimensional Kinematics'
},
{
'id': '{}@draft'.format(page['id']),
'title': 'test page',
'isPublishable': False,
'publishBlockers': ['no_content'],
}
]
})
def test_put_content(self):
created = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
self.assert_cors_headers(response)
update_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'content': u"Ding dong the switch is flipped.",
'keywords': ['DNA', 'resonance'],
'subjects': ['Science and Technology'],
}
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(document['id']),
update_data, status=200)
result = response.json
self.assertEqual(result['id'], document['id'])
self.assertEqual(result['title'], update_data['title'])
self.assertEqual(result['abstract'], update_data['abstract'])
self.assertEqual(result['language'], document['language'])
self.assertEqual(result['content'], update_data['content'])
self.assertEqual(result['keywords'], update_data['keywords'])
self.assertEqual(result['subjects'], update_data['subjects'])
self.assertEqual(result['created'],
created.astimezone(TZINFO).isoformat())
self.assertEqual(result['revised'],
revised.astimezone(TZINFO).isoformat())
response = self.testapp.get(
'/contents/{}@draft.json'.format(document['id']))
result = response.json
self.assertEqual(result['id'], document['id'])
self.assertEqual(result['title'], update_data['title'])
self.assertEqual(result['abstract'], update_data['abstract'])
self.assertEqual(result['language'], document['language'])
self.assertEqual(result['content'], update_data['content'])
self.assertEqual(result['keywords'], update_data['keywords'])
self.assertEqual(result['subjects'], update_data['subjects'])
self.assertEqual(result['created'],
created.astimezone(TZINFO).isoformat())
self.assertEqual(result['revised'],
revised.astimezone(TZINFO).isoformat())
self.assert_cors_headers(response)
def test_delete_content_401(self):
self.logout()
response = self.testapp.delete('/contents/{}@draft'.format(id),
status=401)
self.assert_cors_headers(response)
def test_delete_content_403(self):
response = self.testapp.post_json(
'/users/contents', {'title': 'My page'}, status=201)
page = response.json
self.assert_cors_headers(response)
self.logout()
self.login('you')
response = self.testapp.delete(
'/contents/{}@draft'.format(page['id']), status=403)
self.assert_cors_headers(response)
def test_delete_content(self):
response = self.testapp.post_json(
'/users/contents', {'title': 'My page'}, status=201)
page = response.json
self.assert_cors_headers(response)
# test that it's possible to get the content we just created
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# delete the content
response = self.testapp.delete(
'/contents/{}@draft'.format(page['id']), status=200)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=404)
def test_delete_content_multiple(self):
# create two pages
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'editors': [{'id': 'user2'}]}, status=201)
page_one = response.json
response = self.testapp.post_json('/users/contents', {
'title': 'Page two'}, status=201)
page_two = response.json
# create a book, put the two pages inside the book, plus
# one page from archive
response = self.testapp.post_json('/users/contents', {
'title': 'My book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{'id': '{}@draft'.format(page_one['id']),
'title': 'Page one'},
{'id': '{}@draft'.format(page_two['id']),
'title': 'Page two'},
{'id': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': 'Page three'}],
},
}, status=201)
book = response.json
# login as user2
self.logout()
self.login('user2')
# create another book, put only page one in it
response = self.testapp.post_json('/users/contents', {
'title': "User2's book",
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{'id': '{}@draft'.format(page_one['id']),
'title': 'Page one'}],
},
}, status=201)
# log back in as user1
self.logout()
self.login('user1')
# delete the book and all the pages inside it
response = self.testapp.put_json('/contents/delete', [
book['id'], page_one['id'], page_two['id'],
'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
], status=200)
# only the book and page_two should be deleted
deleted = response.json
self.assertEqual(deleted, [book['id'], page_two['id']])
self.testapp.get('/contents/{}@draft.json'.format(book['id']),
status=404)
self.testapp.get('/contents/{}@draft.json'.format(page_one['id']),
status=200)
self.testapp.get('/contents/{}@draft.json'.format(page_two['id']),
status=404)
@mock.patch('cnxauthoring.views.logger')
def test_delete_content_w_publish_error(self, logger):
# Start test similar to test for multiple users
response = self.testapp.post_json('/users/contents', {
'title': 'Multiple users test',
'editors': [{'id': 'you'}],
}, status=201)
page = response.json
self.assert_cors_headers(response)
self.testapp.get('/contents/{}@draft.json'.format(page['id']),
status=200)
self.logout()
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.testapp.put_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}]},
status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited by you'}, status=200)
self.logout()
self.login('user2')
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
self.logout()
self.login('user1')
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.assertEqual(response.json['title'],
'Multiple users test edited by you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.delete('/contents/{}@draft'.format(page['id']),
status=403)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# Delete the contents of the page completely from archive's database
with psycopg2.connect(
publishing_settings()['db-connection-string']) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(
"""DELETE FROM document_acl \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM license_acceptances \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM modules \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM role_acceptances \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM document_controls \
WHERE uuid = %s""", (page['id'],))
# Send a delete request to authoring to remove the page from its
# database
self.testapp.delete(
'/contents/{}@draft/users/me'.format(page['id']), status=200)
# Check to see that authoring created a warning message when publishing
# failed to find the page.
self.assertEqual(logger.exception.call_count, 1)
args1, = logger.exception.call_args_list
self.assertEqual(
args1[0], ('Warning: '
'publishing error on '
'content id {} '.format(page['id']),))
# Make sure user can no longer access the page
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
# Finish the test by making sure the requests sent by
# uses are uneffected by the publishing warning.
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
self.logout()
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited again by you'}, status=200)
self.logout()
self.login('user1')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
post_data = {
'id': '{}@draft'.format(page['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
def test_delete_content_binder(self):
# Create a page first
response = self.testapp.post_json('/users/contents', {
'title': 'My page',
}, status=201)
page = response.json
self.assert_cors_headers(response)
# Create a book with the page inside
response = self.testapp.post_json('/users/contents', {
'title': 'My book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page['id']),
'title': 'My page',
},
],
},
}, status=201)
book_one = response.json
self.assert_cors_headers(response)
# Create another book with the same page inside
response = self.testapp.post_json('/users/contents', {
'title': 'My different book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page['id']),
'title': 'My page',
},
],
},
}, status=201)
book_two = response.json
self.assert_cors_headers(response)
# Assert that the page is contained in two books
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(sorted(result['containedIn']),
sorted([book_one['id'], book_two['id']]))
# Delete book one
self.testapp.delete('/contents/{}@draft'.format(book_one['id']),
status=200)
self.testapp.get('/contents/{}@draft.json'.format(book_one['id']),
status=404)
# Assert that the page is now only contained in book two
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(result['containedIn'], [book_two['id']])
def test_delete_content_multiple_users(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Multiple users test',
'editors': [{'id': 'you'}],
}, status=201)
page = response.json
self.assert_cors_headers(response)
self.testapp.get('/contents/{}@draft.json'.format(page['id']),
status=200)
self.logout()
self.login('you')
# editor should get the content in their workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
# make sure the editor can also view the content
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# make sure the editor can also edit the content after accepting their
# role
self.testapp.put_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}]},
status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited by you'}, status=200)
self.logout()
self.login('user2')
# someone not in acl should not be able to view the content
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
self.logout()
# log back in as the submitter and check that the title has been
# changed
self.login('user1')
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.assertEqual(response.json['title'],
'Multiple users test edited by you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
# try to delete the content should return an error
self.testapp.delete('/contents/{}@draft'.format(page['id']),
status=403)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# delete user1 from the content
self.testapp.delete(
'/contents/{}@draft/users/me'.format(page['id']), status=200)
# content should not appear in user1's workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
self.logout()
# content should still be accessible by "you"
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited again by you'}, status=200)
self.logout()
# content should not appear in user1's workspace
self.login('user1')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
# re-add user1 to the document
post_data = {
'id': '{}@draft'.format(page['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
def test_search_content_401(self):
self.logout()
response = self.testapp.get('/users/contents/search', status=401)
self.assert_cors_headers(response)
def test_search_content_no_q(self):
response = self.testapp.get('/users/contents/search', status=200)
result = response.json
self.assertEqual(result, {
'query': {'limits': []},
'results': {
'items': [],
'total': 0,
'limits': [],
}
})
self.assert_cors_headers(response)
def test_search_content_q_empty(self):
response = self.testapp.get('/users/contents/search?q=', status=200)
result = response.json
self.assertEqual(result, {
'query': {'limits': []},
'results': {
'items': [],
'total': 0,
'limits': [],
}
})
self.assert_cors_headers(response)
def test_search_unbalanced_quotes(self):
self.logout()
self.login('user2')
post_data = {'title': u'Document'}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.assert_cors_headers(response)
response = self.testapp.get('/users/contents/search?q="Document', status=200)
result = response.json
self.assertEqual(result['query']['limits'],
[{'tag': 'text', 'value': 'Document'}])
self.assertEqual(result['results']['total'], 1)
self.assert_cors_headers(response)
def test_search_content(self):
post_data = {'title': u"Document"}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.logout()
self.login('user2')
post_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'created': u'2014-03-13T15:21:15.677617',
'revised': u'2014-03-13T15:21:15.677617',
'license': {'url': DEFAULT_LICENSE.url},
'language': u'en',
'contents': u"Ding dong the switch is flipped.",
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
doc_id = result['id']
self.assert_cors_headers(response)
post_data = {'title': u'New stuff'}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
new_doc_id = result['id']
self.assert_cors_headers(response)
# should not be able to get other user's documents
response = self.testapp.get('/users/contents/search?q=document', status=200)
result = response.json
self.assertDictEqual(result, {
'query': {
'limits': [{'tag': 'text', 'value': 'document'}]},
'results': {
'items': [],
'total': 0,
'limits': []}})
self.assert_cors_headers(response)
# should be able to search user's own documents
response = self.testapp.get('/users/contents/search?q=DNA', status=200)
result = response.json
self.assertEqual(result['results']['total'], 1)
self.assertEqual(result['results']['items'][0]['id'],
'{}@draft'.format(doc_id))
self.assert_cors_headers(response)
# should be able to search multiple terms
response = self.testapp.get('/users/contents/search?q=new+resonance', status=200)
result = response.json
self.assertEqual(result['query']['limits'], [
{'tag': 'text', 'value': 'new'},
{'tag': 'text', 'value': 'resonance'}])
self.assertEqual(result['results']['total'], 2)
self.assertEqual(sorted([i['id'] for i in result['results']['items']]),
sorted(['{}@draft'.format(doc_id),
'{}@draft'.format(new_doc_id)]))
self.assert_cors_headers(response)
# should be able to search with double quotes
response = self.testapp.get('/users/contents/search?q="through resonance"',
status=200)
result = response.json
self.assertEqual(result['query']['limits'], [
{'tag': 'text', 'value': 'through resonance'}])
self.assertEqual(result['results']['total'], 1)
self.assertEqual(result['results']['items'][0]['id'],
'{}@draft'.format(doc_id))
self.assert_cors_headers(response)
def test_get_resource_401(self):
self.logout()
response = self.testapp.get('/resources/1234abcde', status=401)
self.assert_cors_headers(response)
def test_get_resource_403(self):
with open(get_data('1x1.png'), 'rb') as data:
upload_data = data.read()
response = self.testapp.post(
'/resources',
{'file': Upload('1x1.png', upload_data, 'image/png')},
status=201)
self.assert_cors_headers(response)
redirect_url = response.headers['Location']
with mock.patch('cnxauthoring.models.Resource.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(redirect_url, status=403)
self.assert_cors_headers(response)
def test_get_resource_404(self):
response = self.testapp.get('/resources/1234abcde', status=404)
self.assert_cors_headers(response)
def test_get_resource_html(self):
"""Test that a html resource file will get downloaded as a binary file
to avoid people using it to steal cookies etc
See https://github.com/Connexions/cnx-authoring/issues/64
"""
upload_data = b'<html><body><h1>title</h1></body></html>'
response = self.testapp.post('/resources', {
'file': Upload('a.html', upload_data,
'text/html')}, status=201)
redirect_url = response.headers['Location']
self.assert_cors_headers(response)
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'application/octet-stream')
self.assert_cors_headers(response)
def test_get_resource(self):
with open(get_data('1x1.png'), 'rb') as data:
upload_data = data.read()
response = self.testapp.post(
'/resources',
{'file': Upload('1x1.png', upload_data, 'image/png')},
status=201)
redirect_url = response.headers['Location']
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'image/png')
self.assert_cors_headers(response)
# any logged in user can retrieve any resource files
self.logout()
self.login('user3')
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'image/png')
self.assert_cors_headers(response)
def test_post_resource_401(self):
self.logout()
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=401)
self.assert_cors_headers(response)
def test_post_resource_403(self):
with mock.patch('cnxauthoring.models.Resource.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=403)
self.assert_cors_headers(response)
def test_post_resource(self):
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assert_cors_headers(response)
def test_post_duplicate_resource(self):
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assert_cors_headers(response)
def test_post_resource_exceed_size_limit(self):
two_mb = b'x' * 2 * 1024 * 1024
response = self.testapp.post(
'/resources',
# a 2MB file, size limit for tests is 1MB
{'file': Upload('a.txt', two_mb, 'text/plain')},
status=400)
self.assertIn(b'File uploaded has exceeded limit 1MB', response.body)
def test_user_search_no_q(self):
response = self.testapp.get('/users/search')
result = response.json
self.assertEqual(result, {
u'total_count': 0,
u'users': [],
})
self.assert_cors_headers(response)
def test_user_search_q_empty(self):
response = self.testapp.get('/users/search?q=')
result = response.json
self.assertEqual(result, {
u'total_count': 0,
u'users': [],
})
self.assert_cors_headers(response)
def test_user_search(self):
mock_accounts_search_results = {
u'items': [
{u'username': u'admin', u'id': 1},
{u'username': u'karenc', u'id': 6},
{u'username': u'karenchan', u'id': 4},
{u'username': u'karenchan2014',
u'first_name': u'Karen',
u'last_name': u'Chan',
u'id': 10,
u'full_name': u'Karen Chan'},
{u'username': u'user_30187', u'id': 9}
],
u'total_count': 5}
with mock.patch('openstax_accounts.stub.OpenstaxAccounts.search'
) as accounts_search:
accounts_search.return_value = mock_accounts_search_results
response = self.testapp.get('/users/search?q=admin')
args, kwargs = accounts_search.call_args
self.assertEqual(args, ('admin',))
self.assertEqual(kwargs, {
'per_page': 10, 'order_by': 'last_name,first_name'})
result = response.json
self.assertEqual(result, {
u'users': [
{
u'id': u'admin',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenc',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenchan',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenchan2014',
u'firstname': u'Karen',
u'surname': u'Chan',
u'fullname': u'Karen Chan',
u'suffix': None,
u'title': None,
},
{
u'id': u'user_30187',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
],
u'total_count': 5,
})
self.assert_cors_headers(response)
def test_profile_401(self):
self.logout()
response = self.testapp.get('/users/profile', status=401)
self.assert_cors_headers(response)
def test_profile(self):
response = self.testapp.get('/users/profile', status=200)
result = response.json
self.assertEqual(result, SUBMITTER)
self.assert_cors_headers(response)
def test_user_contents_401(self):
self.logout()
response = self.testapp.get('/users/contents', status=401)
self.assert_cors_headers(response)
def test_user_contents(self):
# user1 adds a document
response = self.testapp.post_json(
'/users/contents',
{'title': 'document by default user',
'editors': [{"id": "user2"}],
}, status=201)
page = response.json
# user1 adds user3 as an author, editor, licensor and publisher
# and adds user4 as a translator
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'authors': page['authors'] + [{'id': 'user3'}],
'editors': page['editors'] + [{'id': 'user3'}],
'translators': [{'id': 'user4'}],
'licensors': page['licensors'] + [{'id': 'user3'}],
'publishers': page['publishers'] + [{'id': 'user3'}]},
status=200)
page = response.json
# user1 removes user4 as a translator
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'translators': []}, status=200)
page = json.loads(response.body.decode('utf-8'))
# the document should show up in user1's workspace
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']), []), content_ids)
# user2 should be able to see the document user1 added
self.logout()
self.login('user2')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), ['editors'], 'Awaiting acceptance'
), content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user2 rejects the role request
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': False}]},
status=200)
# user2 should see the document with state "Rejecting roles" on their
# workspace
response = self.testapp.get('/users/contents', status=200)
result = json.loads(response.body.decode('utf-8'))
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), [], 'Rejected roles'), content_ids)
self.assert_cors_headers(response)
# after user2 deletes the document from the workspace, they won't see
# it anymore
self.testapp.delete('/contents/{}@draft/users/me'.format(page['id']))
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
self.assert_cors_headers(response)
# user3 should be able to see the document user1 added
self.logout()
self.login('user3')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']),
['authors', 'copyright_holders', 'editors',
'publishers']), content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user3 should not be able to edit the document before accepting their
# role
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=403)
# user3 rejects the editor role
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': False}]},
status=200)
# user3 should still be able to view the content
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']),
['authors', 'copyright_holders', 'publishers']),
content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user3 accepts their other roles
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'authors', 'hasAccepted': True},
{'role': 'publishers', 'hasAccepted': True},
{'role': 'licensors', 'hasAccepted': True}]},
status=200)
# user3 should be able to edit the document after accepting their
# role
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=200)
# user4 should not be able to see the document user1 added
self.logout()
self.login('user4')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
self.assert_cors_headers(response)
# user1 adds user2 as an illustrator
self.logout()
self.login('user1')
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'illustrators': [{'id': 'user2'}]}, status=200)
page = response.json
# user2 should see the document in their workspace again
self.logout()
self.login('user2')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), ['illustrators'],
'Awaiting acceptance'), content_ids)
# user1 removes self from all roles
self.logout()
self.login('user1')
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'authors': [i for i in page['authors'] if i['id'] != 'user1'],
'publishers': [i for i in page['publishers']
if i['id'] != 'user1'],
'licensors': [i for i in page['licensors']
if i['id'] != 'user1']},
status=200)
# user1 should not see the document in their workspace
response = self.testapp.get('/users/contents')
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
def test_user_contents_ordering(self):
# user4 adds a document
self.logout()
self.login('user4')
date = datetime.datetime(2014, 3, 13, 15, 21, 15, 677617)
date = pytz.timezone(os.environ['TZ']).localize(date)
posting_tzinfo = pytz.timezone('America/Whitehorse')
posting_date = date.astimezone(posting_tzinfo)
from ..utils import utf8
response = self.testapp.post_json('/users/contents', {
'title': 'document by user4',
'created': utf8(posting_date.isoformat()),
'revised': utf8(posting_date.isoformat()),
}, status=201)
page = response.json
# user4 should get back the contents just posted - full content test
response = self.testapp.get('/users/contents', status=200)
result = response.json
from ..models import TZINFO
# Localize the resulting datetime info.
from ..utils import utf8
expected_result_revised_date = date.astimezone(TZINFO)
self.assertEqual(result, {
u'query': {
u'limits': [],
},
u'results': {u'items': [
{u'derivedFrom': None,
u'containedIn': [],
u'id': u'{}@draft'.format(page['id']),
u'mediaType': u'application/vnd.org.cnx.module',
u'revised': utf8(expected_result_revised_date.isoformat()),
u'state': u'Draft',
u'title': u'document by user4',
u'version': u'draft',
u'rolesToAccept': [],
}],
u'limits': [],
u'total': 1}
})
self.assert_cors_headers(response)
one_week_ago = datetime.datetime.now(TZINFO) - datetime.timedelta(7)
two_weeks_ago = datetime.datetime.now(TZINFO) - datetime.timedelta(14)
mock_datetime = mock.Mock()
mock_datetime.now = mock.Mock(return_value=one_week_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'derivedFrom': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1'},
status=201)
self.assert_cors_headers(response)
mock_datetime.now = mock.Mock(return_value=two_weeks_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'title': 'oldest document by user4'}, status=201)
self.assert_cors_headers(response)
response = self.testapp.post_json(
'/users/contents', {'title': 'new document by user4'}, status=201)
self.assert_cors_headers(response)
response = self.testapp.get('/users/contents', status=200)
result = response.json
self.assertEqual(result['results']['total'], 4)
self.assertTrue(result['results']['items'][0]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][1]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][2]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][3]['id'].endswith('@draft'))
titles = [i['title'] for i in result['results']['items']]
self.assertEqual(titles, [
u'new document by user4',
u'Copy of Indkøb',
u'oldest document by user4',
u'document by user4'])
derived_from = [i['derivedFrom'] for i in result['results']['items']]
self.assertEqual(derived_from, [
None, '91cb5f28-2b8a-4324-9373-dac1d617bc24@1', None, None])
self.assertEqual(response.headers['Access-Control-Allow-Credentials'],
'true')
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost:8000')
self.assert_cors_headers(response)
def test_user_contents_hide_documents_inside_binders(self):
self.logout()
self.login('user5')
one_day_ago = datetime.datetime.now(tz=TZINFO) - datetime.timedelta(1)
one_week_ago = datetime.datetime.now(tz=TZINFO) - datetime.timedelta(7)
mock_datetime = mock.Mock()
mock_datetime.now = mock.Mock(return_value=one_day_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'title': 'single page document'}, status=201)
single_page = response.json
mock_datetime.now = mock.Mock(return_value=one_week_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents', {'title': 'page in a book'}, status=201)
page_in_book = response.json
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
'title': 'book',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page_in_book['id']),
},
],
},
}, status=201)
book = response.json
# since page_in_book is in book, it should not show in the workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [],
u'id': u'{}@draft'.format(book['id']),
u'title': book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': book['revised'],
u'mediaType': u'application/vnd.org.cnx.collection',
u'rolesToAccept': [],
},
{
u'containedIn': [],
u'id': u'{}@draft'.format(single_page['id']),
u'title': single_page['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': single_page['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# remove page_in_book from book and add single_page to book
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(book['id']), {
'tree': {
'contents': [
{
'id': '{}@draft'.format(single_page['id']),
},
],
},
}, status=200)
book = response.json
# add page_in_book to a book by someone else
self.logout()
self.login('user6')
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
'title': 'some other book',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page_in_book['id']),
},
],
},
}, status=201)
other_book = response.json
self.logout()
self.login('user5')
# workspace should now show page_in_book and book
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [],
u'id': u'{}@draft'.format(book['id']),
u'title': book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': book['revised'],
u'mediaType': u'application/vnd.org.cnx.collection',
u'rolesToAccept': [],
},
{
u'containedIn': [other_book['id']],
u'id': u'{}@draft'.format(page_in_book['id']),
u'title': page_in_book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': page_in_book['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# retrieve just pages, should now show all pages
response = self.testapp.get(
'/users/contents?mediaType=application/vnd.org.cnx.module',
status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [book['id']],
u'id': u'{}@draft'.format(single_page['id']),
u'title': single_page['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': single_page['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
{
u'containedIn': [other_book['id']],
u'id': u'{}@draft'.format(page_in_book['id']),
u'title': page_in_book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': page_in_book['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# Now filter for not:Draft - should supress all
response = self.testapp.get('/users/contents?state=not:Draft',
status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [],
u'total': 0,
u'limits': [],
},
})
def test_db_restart(self):
'''
Test to see if the database resets itself after a broken
connection
'''
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=201,
expect_errors=True)
self.assertEqual(response.status, '201 Created')
def test_service_unavailable_response(self):
'''
Test service unavailable response when a request is made during a
closed or lost database connection.
'''
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.get(
'/resources/1234abcde',
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.put_json(
'/contents/[email protected]',
{},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.get(
'/users/contents/search',
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.delete(
'/contents/{}@draft'.format(id),
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
@mock.patch('cnxauthoring.views.logger')
def get_database_restart_failed(self, logger):
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
with mock.patch.object(storage, 'restart') as mock_restart:
mock_restart.side_effect = storage.Error
response = self.testapp.post_json(
'/users/contents',
{'title': 'Test Document'},
status=503)
self.assertEqual(mock_restart.call_count, 1)
self.assertEqual(logger.exception.call_count, 3)
args1, args2, args3 = logger.exception.call_args_list
self.assertEqual(args1[0], ('Storage failure',))
self.assertEqual(args2[0], ('Storage failed to abort',))
self.assertEqual(args3[0], ('Storage failed to restart',))
class PublicationTests(BaseFunctionalTestCase):
def test_publish_401(self):
self.logout()
response = self.testapp.post_json('/publish', {}, status=401)
self.assert_cors_headers(response)
def test_publish_403(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page['id'],
],
}
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/publish', post_data, status=403)
self.assertTrue('You do not have permission to publish'
in response.body.decode('utf-8'))
post_data = {
'title': 'Binder',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
book = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
book['id'],
],
}
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/publish', post_data, status=403)
self.assertTrue('You do not have permission to publish'
in response.body.decode('utf-8'))
def test_publish_service_not_available(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': 'Publishing is working!',
'items': [
page['id'],
],
}
with mock.patch('requests.post') as patched_post:
patched_post.return_value = mock.Mock(status_code=404)
response = self.testapp.post_json(
'/publish', post_data, status=400)
self.assertEqual(patched_post.call_count, 1)
self.assertTrue('Unable to publish: response status code: 404'
in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_publish_response_not_json(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': 'Publishing is working!',
'items': [
page['id'],
],
}
with mock.patch('requests.post') as patched_post:
patched_post.return_value = mock.Mock(
status_code=200, content=b'not json')
response = self.testapp.post_json(
'/publish', post_data, status=400)
self.assertEqual(patched_post.call_count, 1)
self.assertTrue('Unable to publish: response body: not json'
in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_publish_single_pages(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'title': u'Página dos',
'content': (u'<html><body><p>Contents of Página dos</p></body>'
u'</html>'),
'abstract': 'Hola!',
'language': 'es',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_two = response.json
# User makes a publication of the two pages...
post_data = {
'submitlog': u'Nueva versión!',
'items': (page_one['id'], page_two['id'],),
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
page_one['id']: '{}@1'.format(page_one['id']),
page_two['id']: '{}@1'.format(page_two['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (page_one, page_two,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_derived_from_single_page(self):
# Create the derived page
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# Publish the derived page
post_data = {
'submitlog': 'Publishing is working!',
'items': [
'{}@draft'.format(page['id']),
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assert_cors_headers(response)
publication_info = response.json
publication_id = publication_info['publication']
self.assertEqual(publication_info['state'], 'Done/Success')
self.assertEqual(publication_info['mapping'][page['id']],
'{}@1'.format(page['id']))
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(result['state'], 'Done/Success')
self.assertEqual(result['publication'], unicode(publication_id))
def test_publish_binder(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Page two',
'content': '<html><body><p>Content of page two</p></body></html>',
'abstract': 'gotta have one'
}, status=201)
page2 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page1['id']),
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': '{}@draft'.format(page2['id']),
'title': 'Page two',
},
],
},
],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
post_data = {
'submitlog': 'Publishing a book is working?',
'items': (binder['id'], page1['id'], page2['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
binder['id']: '{}@1.1'.format(binder['id']),
page1['id']: '{}@1'.format(page1['id']),
page2['id']: '{}@1'.format(page2['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (binder, page1, page2,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_as_author(self):
author_id = 'cnxcap'
# Post a page.
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
# Put an author on.
page1['authors'].append({'id': author_id, 'type': 'cnx-id'})
response = self.testapp.put_json('/contents/{}@draft.json'
.format(page1['id']), page1)
page1 = response.json
self.logout()
# Login as the author to accept the role and publish.
self.login(author_id)
self.testapp.post_json('/contents/{}@draft/acceptance'
.format(page1['id']),
{'license': True,
'roles': [{'role': 'authors',
'hasAccepted': True}]})
post_data = {
'submitlog': 'Publishing a page as an author is working?',
'items': (page1['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {page1['id']: '{}@1'.format(page1['id'])}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
url = '/contents/{}@draft.json'.format(page1['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_binder_w_printStyle(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
'printStyle': '*PDF Print Style*',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Page two',
'content': '<html><body><p>Content of page two</p></body></html>',
'printStyle': '[PDF Print Style]',
'abstract': 'need one'
}, status=201)
page2 = response.json
self.assert_cors_headers(response)
page1_str = '{}@draft'.format(page1['id'])
page2_str = '{}@draft'.format(page2['id'])
response = self.testapp.post_json(
'/users/contents',
{
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': page1_str,
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': page2_str,
'title': 'Page two',
},
],
},
],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
post_data = {
'submitlog': 'Publishing a book is working?',
'items': (binder['id'], page1['id'], page2['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
binder['id']: '{}@1.1'.format(binder['id']),
page1['id']: '{}@1'.format(page1['id']),
page2['id']: '{}@1'.format(page2['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (binder, page1, page2,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_derived_from_binder(self):
self.logout()
# Create a derived binder
self.login('e5a07af6-09b9-4b74-aa7a-b7510bee90b8')
post_data = {
'derivedFrom': u'[email protected]',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
binder = response.json
self.assert_cors_headers(response)
# Publish the derived binder
post_data = {
'submitlog': 'Publishing a derived book',
'items': [
binder['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assert_cors_headers(response)
publication_info = response.json
publication_id = publication_info['publication']
self.assertEqual(publication_info['state'], 'Waiting for moderation')
self.assertEqual(publication_info['mapping'][binder['id']],
'{}@1.1'.format(binder['id']))
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']))
result = response.json
self.assertEqual(result['state'], 'Waiting for moderation')
self.assertEqual(result['publication'], unicode(publication_id))
def test_publish_revision_single_page(self):
id = '91cb5f28-2b8a-4324-9373-dac1d617bc24'
# If the content already exists, because of other tests, remove it.
from ..storage import storage
document = storage.get(id=id)
if document is not None:
storage.remove(document)
storage.persist()
self.logout()
# Create the revision
self.login('Rasmus1975')
post_data = {
'id': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u'Turning DNA through resonance',
'abstract': u'Theories on turning DNA structures',
'language': u'en',
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.assert_cors_headers(response)
page = response.json
# Publish the revision
post_data = {
'submitlog': 'Publishing a revision',
'items': [
page['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
publication_info = response.json
self.assertEqual(publication_info['state'], 'Done/Success')
self.assertEqual(publication_info['mapping'][page['id']],
'{}@2'.format(page['id']))
def test_edit_after_publish(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
# authoring should have the document in the db with status
# "Done/Success"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Done/Success')
# editing the content again
post_data = {
'id': '{}@1'.format(page_one['id']),
'title': 'Page one v2',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
# post with the same id should return the same draft
post_data = {
'id': '{}@1'.format(page_one['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
self.assertEqual(page_one['title'], 'Page one v2')
# publish the next version
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@2'.format(page_one['id'])])
def test_delete_after_publish(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
# authoring should have the document in the db with status
# "Done/Success"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Done/Success')
# delete the content from authoring
response = self.testapp.delete(
'/contents/{}@1'.format(page_one['id']), post_data, status=200)
self.testapp.get('/contents/{}@1'.format(page_one['id']), status=404)
def test_publish_after_error(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p><img src="a.png" /></p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response._headers['publish_state'], 'Failed/Error')
self.assertEqual(response._headers['error_type'], 'InvalidReference')
# authoring should have the document in the db with status
# "Failed/Error"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Failed/Error')
# fix up the invalid reference
post_data = {
'id': '{}'.format(page_one['id']),
'title': 'Page one v2',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page_one['id']), post_data)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
# publish again
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
def test_publish_w_multiple_users(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# add an editor
post_data = {
'editors': [{'id': 'user2'}],
}
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
# edit some more
post_data = {
'title': 'Page one with an editor',
}
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
post_data = {
'submitlog': u'Nueva versión!',
'items': [page['id']],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
# publication should be waiting for acceptance
publish = response.json
self.assertEqual(publish['state'], 'Waiting for acceptance')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page['id'])])
# login as user2 and accept roles
self.logout()
self.login('user2')
post_data = {
'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}],
}
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# publish the content again
self.logout()
self.login('user1')
post_data = {
'submitlog': u'Nueva versión!',
'items': [page['id']],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
# publication should be waiting for acceptance
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page['id'])])
def test_acceptance(self):
# create a new page
post_data = {
'title': 'My Page',
}
created = datetime.datetime.now(TZINFO)
formatted_created = created.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# user1 has accepted all their roles
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user1',
u'roles': [{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'authors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'copyright_holders'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'publishers'}],
})
# add user2 to authors and editors, add user1 to editors, add user3 to
# translators, licensors and publishers, add user4 to translators
post_data = {
'authors': page['authors'] + [{'id': 'user2'}],
'editors': page['editors'] + [{'id': 'user1'}, {'id': 'user2'}],
'translators': page['translators'] +
[{'id': 'user3'}, {'id': 'user4'}],
'licensors': page['licensors'] + [{'id': 'user3'}],
'publishers': page['publishers'] + [{'id': 'user3'}],
}
now = datetime.datetime.now(TZINFO)
formatted_now = now.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
page = response.json
# user1 should accept the editor role automatically
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user1',
u'roles': [{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'authors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'copyright_holders'},
{u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'editors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'publishers'}],
})
# log in as user2
self.logout()
self.login('user2')
# user2 should have authors and editors in acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user2',
u'roles': [{u'role': u'authors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'editors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user2 accepts the roles
post_data = {
'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True},
{'role': 'authors', 'hasAccepted': True}],
}
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# checks the acceptance info again (all roles accepted)
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user2',
u'roles': [{u'role': u'authors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True},
{u'role': u'editors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True}],
})
# login as user3
self.logout()
self.login('user3')
# user3 should have translators, licensors and publishers in the
# acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user3',
u'roles': [{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user3 rejects their roles
post_data = {
'license': False,
'roles': [{'role': 'translators', 'hasAccepted': False},
{'role': 'copyright_holders', 'hasAccepted': False},
{'role': 'publishers', 'hasAccepted': False}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# check the acceptance info for user3 again
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance['roles'], [
{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False}])
# user3 should still be able to view the content, but not edit
self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=403)
# user3 changes their mind and accepts one of their roles
post_data = {
'license': True,
'roles': [{'role': 'copyright_holders', 'hasAccepted': True}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# check the acceptance info for user3 again
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance['roles'], [
{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False}])
# user3 should be able to view and edit the content
self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {})
# content should be in the workspace
response = self.testapp.get('/users/contents')
workspace = response.json
content_ids = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), content_ids)
# login as user4
self.logout()
self.login('user4')
# user4 should have translators in the acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user4',
u'roles': [{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user4 accepts their roles without accepting the license
post_data = {
'license': False,
'roles': [{'role': 'translators', 'hasAccepted': True}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# acceptance info is reset
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user4',
u'roles': [{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
def test_publish_w_changed_license(self):
author_id = 'cnxcap'
# Post a page.
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
# Put an author on.
page1['authors'].append({'id': author_id, 'type': 'cnx-id'})
response = self.testapp.put_json('/contents/{}@draft.json'
.format(page1['id']), page1)
page1 = response.json
self.logout()
# Login as the author to accept the role and publish.
self.login(author_id)
self.testapp.post_json('/contents/{}@draft/acceptance'
.format(page1['id']),
{'license': True,
'roles': [{'role': 'authors',
'hasAccepted': True}]})
# Prepare the post data
from ..models import LICENSES
license = [l for l in LICENSES if l.code == 'by-nc-sa'][0]
post_data = {
'submitlog': 'Publishing a page as an author is working?',
'items': (page1['id'],),
'license': license.__json__(),
}
# Try to publish with a missing license url.
missing_info_post_data = deepcopy(post_data)
del missing_info_post_data['license']['url']
response = self.testapp.post_json('/publish', missing_info_post_data,
status=400)
self.assertIn("Missing license url", response.body)
# Try to publish with an invalid license.
invalid_post_data = deepcopy(post_data)
agpl_license_url = 'https://www.gnu.org/licenses/agpl-3.0'
invalid_post_data['license']['url'] = agpl_license_url
response = self.testapp.post_json('/publish', invalid_post_data,
status=400)
self.assertIn("Invalid license url", response.body)
# Publish under license by-nc-sa.
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {page1['id']: '{}@1'.format(page1['id'])}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
url = '/contents/{}@draft.json'.format(page1['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
self.assertEqual(response.json['license']['url'], license.url)
# Check publishing for the correct license and acceptance.
publishing_host = integration_test_settings()['publishing.url']
url = '/contents/{}/licensors'.format(page1['id'])
url = urljoin(publishing_host, url)
response = requests.get(url)
self.assertEqual(response.json()['license_url'], license.url)
self.assertEqual(
[l['has_accepted'] for l in response.json()['licensors']],
[True, True]
)
# Check archive for the correct license
archive_host = integration_test_settings()['archive.url']
url = '/contents/{}@1.json'.format(page1['id'])
url = urljoin(archive_host, url)
response = requests.get(url)
self.assertEqual(response.json()['license']['url'], license.url)
| agpl-3.0 | 2,964,078,937,698,059,000 | 39.794775 | 121 | 0.510653 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/__init__.py | 1 | 20811 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tos", "__metric")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"summary-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict([("tos", tos), ("metric", metric)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tos", "__metric")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"summary-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict([("tos", tos), ("metric", metric)])
| apache-2.0 | -3,628,145,791,548,564,000 | 40.291667 | 440 | 0.575273 | false |
praekelt/jmbo-foundry | foundry/migrations/0037_auto__add_field_country_country_code__add_unique_country_slug.py | 1 | 27780 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Country.country_code'
db.add_column('foundry_country', 'country_code',
self.gf('django.db.models.fields.CharField')(max_length=2, unique=True, null=True, db_index=True),
keep_default=False)
# Adding unique constraint on 'Country', fields ['slug']
db.create_unique('foundry_country', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Country', fields ['slug']
db.delete_unique('foundry_country', ['slug'])
# Deleting field 'Country.country_code'
db.delete_column('foundry_country', 'country_code')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.blogpost': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BlogPost', '_ormbases': ['jmbo.ModelBase']},
'content': ('ckeditor.fields.RichTextField', [], {}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.chatroom': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ChatRoom', '_ormbases': ['jmbo.ModelBase']},
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'designation': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'})
},
'foundry.link': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'link_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_title_tiled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pinned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'listing_pinned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Country']", 'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'receive_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receive_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.menu': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.notification': {
'Meta': {'object_name': 'Notification'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Member']"})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.pageview': {
'Meta': {'object_name': 'PageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'has_left_or_right_column': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
| bsd-3-clause | 169,661,863,726,752,320 | 83.43769 | 208 | 0.546724 | false |
siliconsmiley/QGIS | python/plugins/processing/gui/ParametersPanel.py | 1 | 16790 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ParametersPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
(C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
otb at c-s dot fr (CS SI)
Contributors : Victor Olaya
Alexia Mondot (CS SI) - managing the new parameter
ParameterMultipleExternalInput
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import locale
from PyQt4 import uic
from PyQt4.QtCore import QCoreApplication, QVariant
from PyQt4.QtGui import QWidget, QLayout, QVBoxLayout, QHBoxLayout, QToolButton, QIcon, QLabel, QCheckBox, QComboBox, QLineEdit, QPlainTextEdit
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.OutputSelectionPanel import OutputSelectionPanel
from processing.gui.InputLayerSelectorPanel import InputLayerSelectorPanel
from processing.gui.FixedTablePanel import FixedTablePanel
from processing.gui.RangePanel import RangePanel
from processing.gui.MultipleInputPanel import MultipleInputPanel
from processing.gui.NumberInputPanel import NumberInputPanel
from processing.gui.ExtentSelectionPanel import ExtentSelectionPanel
from processing.gui.FileSelectionPanel import FileSelectionPanel
from processing.gui.CrsSelectionPanel import CrsSelectionPanel
from processing.gui.GeometryPredicateSelectionPanel import \
GeometryPredicateSelectionPanel
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterFixedTable
from processing.core.parameters import ParameterRange
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterCrs
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterGeometryPredicate
from processing.core.outputs import OutputRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetParametersPanel.ui'))
class ParametersPanel(BASE, WIDGET):
NOT_SELECTED = QCoreApplication.translate('ParametersPanel', '[Not selected]')
def __init__(self, parent, alg):
super(ParametersPanel, self).__init__(None)
self.setupUi(self)
self.grpAdvanced.hide()
self.layoutMain = self.scrollAreaWidgetContents.layout()
self.layoutAdvanced = self.grpAdvanced.layout()
self.parent = parent
self.alg = alg
self.valueItems = {}
self.labels = {}
self.widgets = {}
self.checkBoxes = {}
self.dependentItems = {}
self.iterateButtons = {}
self.initWidgets()
def initWidgets(self):
#tooltips = self.alg.getParameterDescriptions()
# If there are advanced parameters — show corresponding groupbox
for param in self.alg.parameters:
if param.isAdvanced:
self.grpAdvanced.show()
break
# Create widgets and put them in layouts
for param in self.alg.parameters:
if param.hidden:
continue
desc = param.description
if isinstance(param, ParameterExtent):
desc += self.tr(' (xmin, xmax, ymin, ymax)')
try:
if param.optional:
desc += self.tr(' [optional]')
except:
pass
widget = self.getWidgetFromParameter(param)
self.valueItems[param.name] = widget
if isinstance(param, ParameterVector) and \
not self.alg.allowOnlyOpenedLayers:
layout = QHBoxLayout()
layout.setSpacing(2)
layout.setMargin(0)
layout.addWidget(widget)
button = QToolButton()
icon = QIcon(os.path.join(pluginPath, 'images', 'iterate.png'))
button.setIcon(icon)
button.setToolTip(self.tr('Iterate over this layer'))
button.setCheckable(True)
layout.addWidget(button)
self.iterateButtons[param.name] = button
button.toggled.connect(self.buttonToggled)
widget = QWidget()
widget.setLayout(layout)
#~ if param.name in tooltips.keys():
#~ tooltip = tooltips[param.name]
#~ else:
#~ tooltip = param.description
#~ widget.setToolTip(tooltip)
if isinstance(param, ParameterBoolean):
widget.setText(desc)
if param.isAdvanced:
self.layoutAdvanced.addWidget(widget)
else:
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, widget)
else:
label = QLabel(desc)
#label.setToolTip(tooltip)
self.labels[param.name] = label
if param.isAdvanced:
self.layoutAdvanced.addWidget(label)
self.layoutAdvanced.addWidget(widget)
else:
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, label)
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, widget)
self.widgets[param.name] = widget
for output in self.alg.outputs:
if output.hidden:
continue
label = QLabel(output.description)
widget = OutputSelectionPanel(output, self.alg)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, label)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, widget)
if isinstance(output, (OutputRaster, OutputVector, OutputTable)):
check = QCheckBox()
check.setText(self.tr('Open output file after running algorithm'))
check.setChecked(True)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, check)
self.checkBoxes[output.name] = check
self.valueItems[output.name] = widget
def buttonToggled(self, value):
if value:
sender = self.sender()
for button in self.iterateButtons.values():
if button is not sender:
button.setChecked(False)
def getExtendedLayerName(self, layer):
authid = layer.crs().authid()
if ProcessingConfig.getSetting(ProcessingConfig.SHOW_CRS_DEF) \
and authid is not None:
return u'{} [{}]'.format(layer.name(), authid)
else:
return layer.name()
def getWidgetFromParameter(self, param):
# TODO Create Parameter widget class that holds the logic
# for creating a widget that belongs to the parameter.
if isinstance(param, ParameterRaster):
layers = dataobjects.getRasterLayers()
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((self.getExtendedLayerName(layer), layer))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterVector):
if self.somethingDependsOnThisParameter(param) or self.alg.allowOnlyOpenedLayers:
item = QComboBox()
layers = dataobjects.getVectorLayers(param.shapetype)
layers.sort(key=lambda lay: lay.name())
if param.optional:
item.addItem(self.NOT_SELECTED, None)
for layer in layers:
item.addItem(self.getExtendedLayerName(layer), layer)
item.currentIndexChanged.connect(self.updateDependentFields)
item.name = param.name
else:
layers = dataobjects.getVectorLayers(param.shapetype)
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((self.getExtendedLayerName(layer), layer))
# if already set, put first in list
for i,(name,layer) in enumerate(items):
if layer and layer.source() == param.value:
items.insert(0, items.pop(i))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterTable):
if self.somethingDependsOnThisParameter(param):
item = QComboBox()
layers = dataobjects.getTables()
if param.optional:
item.addItem(self.NOT_SELECTED, None)
for layer in layers:
item.addItem(layer.name(), layer)
item.currentIndexChanged.connect(self.updateDependentFields)
item.name = param.name
else:
layers = dataobjects.getTables()
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((layer.name(), layer))
# if already set, put first in list
for i,(name,layer) in enumerate(items):
if layer and layer.source() == param.value:
items.insert(0, items.pop(i))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterBoolean):
item = QCheckBox()
if param.default:
item.setChecked(True)
else:
item.setChecked(False)
elif isinstance(param, ParameterTableField):
item = QComboBox()
if param.parent in self.dependentItems:
items = self.dependentItems[param.parent]
else:
items = []
self.dependentItems[param.parent] = items
items.append(param.name)
parent = self.alg.getParameterFromName(param.parent)
if isinstance(parent, ParameterVector):
layers = dataobjects.getVectorLayers(parent.shapetype)
else:
layers = dataobjects.getTables()
if len(layers) > 0:
if param.optional:
item.addItem(self.tr('[not set]'))
item.addItems(self.getFields(layers[0], param.datatype))
elif isinstance(param, ParameterSelection):
item = QComboBox()
item.addItems(param.options)
item.setCurrentIndex(param.default)
elif isinstance(param, ParameterFixedTable):
item = FixedTablePanel(param)
elif isinstance(param, ParameterRange):
item = RangePanel(param)
elif isinstance(param, ParameterFile):
item = FileSelectionPanel(param.isFolder, param.ext)
elif isinstance(param, ParameterMultipleInput):
if param.datatype == ParameterMultipleInput.TYPE_FILE:
item = MultipleInputPanel(datatype=ParameterMultipleInput.TYPE_FILE)
else:
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
options = dataobjects.getRasterLayers(sorting=False)
elif param.datatype == ParameterMultipleInput.TYPE_VECTOR_ANY:
options = dataobjects.getVectorLayers(sorting=False)
else:
options = dataobjects.getVectorLayers([param.datatype], sorting=False)
opts = []
for opt in options:
opts.append(self.getExtendedLayerName(opt))
item = MultipleInputPanel(opts)
elif isinstance(param, ParameterNumber):
item = NumberInputPanel(param.default, param.min, param.max,
param.isInteger)
elif isinstance(param, ParameterExtent):
item = ExtentSelectionPanel(self.parent, self.alg, param.default)
elif isinstance(param, ParameterCrs):
item = CrsSelectionPanel(param.default)
elif isinstance(param, ParameterString):
if param.multiline:
verticalLayout = QVBoxLayout()
verticalLayout.setSizeConstraint(
QLayout.SetDefaultConstraint)
textEdit = QPlainTextEdit()
textEdit.setPlainText(param.default)
verticalLayout.addWidget(textEdit)
item = textEdit
else:
item = QLineEdit()
item.setText(unicode(param.default))
elif isinstance(param, ParameterGeometryPredicate):
item = GeometryPredicateSelectionPanel(param.enabledPredicates)
if param.left:
widget = self.valueItems[param.left]
if isinstance(widget, InputLayerSelectorPanel):
widget = widget.cmbText
widget.currentIndexChanged.connect(item.onLeftLayerChange)
item.leftLayer = widget.itemData(widget.currentIndex())
if param.right:
widget = self.valueItems[param.right]
if isinstance(widget, InputLayerSelectorPanel):
widget = widget.cmbText
widget.currentIndexChanged.connect(item.onRightLayerChange)
item.rightLayer = widget.itemData(widget.currentIndex())
item.updatePredicates()
item.setValue(param.default)
else:
item = QLineEdit()
item.setText(unicode(param.default))
return item
def updateDependentFields(self):
sender = self.sender()
if not isinstance(sender, QComboBox):
return
if sender.name not in self.dependentItems:
return
layer = sender.itemData(sender.currentIndex())
children = self.dependentItems[sender.name]
for child in children:
widget = self.valueItems[child]
widget.clear()
if self.alg.getParameterFromName(child).optional:
widget.addItem(self.tr('[not set]'))
widget.addItems(self.getFields(layer,
self.alg.getParameterFromName(child).datatype))
def getFields(self, layer, datatype):
fieldTypes = []
if datatype == ParameterTableField.DATA_TYPE_STRING:
fieldTypes = [QVariant.String]
elif datatype == ParameterTableField.DATA_TYPE_NUMBER:
fieldTypes = [QVariant.Int, QVariant.Double, QVariant.ULongLong,
QVariant.UInt]
fieldNames = set()
for field in layer.pendingFields():
if not fieldTypes or field.type() in fieldTypes:
fieldNames.add(unicode(field.name()))
return sorted(list(fieldNames), cmp=locale.strcoll)
def somethingDependsOnThisParameter(self, parent):
for param in self.alg.parameters:
if isinstance(param, ParameterTableField):
if param.parent == parent.name:
return True
return False
| gpl-2.0 | 8,098,731,851,604,483,000 | 42.492228 | 143 | 0.58792 | false |
SKIRT/PTS | core/extract/progress.py | 1 | 16047 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.extract.progress Contains the ProgressTable class and the the ProgressExtractor class.
# The latter class is used for extracting simulation progress from a simulation's log files into a ProgressTable object.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.table import Table
# Import the relevant PTS classes and modules
from ..basics.log import log
# -----------------------------------------------------------------
class NoProgressData(Exception):
"""
This class ...
"""
def __init__(self, message, simulation_name=None):
"""
Thisf unction ...
:param message:
:param simulation_name:
"""
# Call the base class constructor with the parameters it needs
super(NoProgressData, self).__init__(message)
# The simulation name
self.simulation_name = simulation_name
# -----------------------------------------------------------------
class ProgressTable(Table):
"""
This function ...
"""
@classmethod
def from_columns(cls, process_list, phase_list, seconds_list, progress_list):
"""
This function ...
:param process_list:
:param phase_list:
:param seconds_list:
:param progress_list:
:return:
"""
names = ["Process rank", "Phase", "Time", "Progress"]
data = [process_list, phase_list, seconds_list, progress_list]
# Call the constructor of the base class
table = cls(data, names=names, masked=True)
# Set the column units
table["Time"].unit = "s"
table["Progress"].unit = "%"
table.path = None
return table
# -----------------------------------------------------------------
@classmethod
def from_file(cls, path):
"""
This function ...
:param path:
:return:
"""
# Open the table
#table = cls.read(path, format="ascii.ecsv")
table = super(ProgressTable, cls).read(path, format="ascii.ecsv")
# Set the path
table.path = path
# Return the table
return table
# -----------------------------------------------------------------
@classmethod
def from_remote_file(cls, path, remote):
"""
This function ...
:param path:
:param remote:
:return:
"""
# Open the contents
contents = remote.get_text(path)
# Open the table
table = cls.read(contents, format="ascii.ecsv")
# Return the table
return table
# -----------------------------------------------------------------
def save(self):
"""
This function ...
:return:
"""
# Save to the current path
self.saveto(self.path)
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
# Write the table in ECSV format
self.write(path, format="ascii.ecsv")
# Set the path
self.path = path
# -----------------------------------------------------------------
def extract_progress_cwd():
"""
Thisf unction ...
:return:
"""
from pts.core.simulation.simulation import createsimulations
# Create a SkirtSimulation object based on a log file present in the current working directory
simulation = createsimulations(single=True)
# Create a new ProgressExtractor instance
extractor = ProgressExtractor()
# Run the extractor and get the table
extractor.run(simulation)
table = extractor.table
# Return the progress table
return table
# -----------------------------------------------------------------
class ProgressExtractor(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# -- Attributes --
self.log_files = None
#self.staggered = None
self.table = None
# The output path
self.output_path = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup(simulation, output_path=output_path)
# 2. Perform the extraction
self.extract()
# 3. Write the results
if self.output_path is not None: self.write()
# -----------------------------------------------------------------
def setup(self, simulation, output_path=None):
"""
This function ...
:param simulation:
:param output_path:
:return:
"""
# Obtain the log files created by the simulation
self.log_files = simulation.logfiles()
# Determine whether the emission spectra calculation was performed using a staggered assignment scheme
# self.staggered = simulation.parameters().staggered()
# Set the output path
self.output_path = output_path
# -----------------------------------------------------------------
def extract(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Extracting ...")
number_of_processes = None
# Initialize lists for the columns
process_list = []
phase_list = []
seconds_list = []
progress_list = []
# Loop over the log files again and fill the column lists
for log_file in self.log_files:
# Get the total number of processes
if number_of_processes is None: number_of_processes = log_file.processes
else: assert number_of_processes == log_file.processes
# Get the process rank associated with this log file
process = log_file.process
stellar_start = None
spectra_start = None
dust_start = None
first_spectra_phase = True
last_dust_phase = False
total_entries = None
entries_per_process = None
# Loop over the entries in the log file
for i in range(len(log_file.contents)):
# Get the description of the current simulation phase
phase = log_file.contents["Phase"][i]
# The current log message
message = log_file.contents["Message"][i]
# The log file entries corresponding to the stellar emission phase
if phase == "stellar":
# If this is the log message that marks the very start of the stellar emission phase, record the associated time
if "photon packages for" in message:
stellar_start = log_file.contents["Time"][i]
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
# If this is one of the log messages that log stellar emission progress
elif "Launched stellar emission photon packages" in message:
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - stellar_start).total_seconds()
# Get the progress and add it to the list
try: progress = float(message.split("packages: ")[1].split("%")[0])
except: continue # INVALID LINE
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds and progress
seconds_list.append(seconds)
progress_list.append(progress)
# The log file entries corresponding to the stellar emission phase
elif phase == "spectra" and first_spectra_phase:
# If this is the log message that marks the very start of the spectra calculation, record the associated time
# If this log message states the total number of library entries that are used, record this number
if "Library entries in use" in message:
spectra_start = log_file.contents["Time"][i]
# Get the total number of library entries in use and the number of entries per process
total_entries = int(message.split("use: ")[1].split(" out of")[0])
entries_per_process = total_entries / number_of_processes
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
elif "Calculating emission for" in message:
entry = float(message.split()[-1][:-3])
# Determine the progress
#if self.staggered: fraction = entry / total_entries
#else: fraction = (entry - process * entries_per_process) / entries_per_process
fraction = entry / total_entries
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - spectra_start).total_seconds()
seconds_list.append(seconds)
# Get the progress and add it to the list
progress = float(fraction*100.0)
progress_list.append(progress)
# The log file entries corresponding to the dust emission phase
# We only want to record the progress of the 'last' dust emission phase
elif phase == "dust" and last_dust_phase:
# If this is the log message that marks the very start of the dust emission phase, record the associated time
if "photon packages for" in message:
dust_start = log_file.contents["Time"][i]
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
# If this is one of the log messages that log dust emission progress
elif "Launched dust emission photon packages" in message:
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - dust_start).total_seconds()
# Get the progress and add it to the list
try: progress = float(message.split("packages: ")[1].split("%")[0])
except: continue # INVALID LINE
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds and progress
seconds_list.append(seconds)
progress_list.append(progress)
# Record the end of the spectra calculation (the first log message of the emission phase of the self-absorption cycle)
elif phase == "dust" and first_spectra_phase:
# If this line indicates the end of the dust emission spectra calculation
if "Dust emission spectra calculated" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("spectra")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - spectra_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
# Indicate that the first spectra phase has already been processed (subsequent spectra phases can be ignored)
first_spectra_phase = False
# Log messages that fall in between phases
elif phase is None:
# The current log message
message = log_file.contents["Message"][i]
# Look for messages indicating whether this dust photon shooting phase corresponds to
# one of the dust self-absorption cycles or the actual dust emission phase
if "dust self-absorption cycle" in message: last_dust_phase = False
elif "Starting the dust emission phase" in message: last_dust_phase = True
elif "Finished the stellar emission phase" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("stellar")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - stellar_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
elif "Finished the dust emission phase" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("dust")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - dust_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
# Create the progress table
self.table = ProgressTable.from_columns(process_list, phase_list, seconds_list, progress_list)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the table to file
self.table.saveto(self.output_path)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Set the table to None
self.table = None
# -----------------------------------------------------------------
| agpl-3.0 | 3,470,374,518,671,627,000 | 31.881148 | 134 | 0.491836 | false |
felipenaselva/felipe.repository | script.module.resolveurl/lib/resolveurl/plugins/trt.py | 1 | 1776 | '''
vidzi resolveurl plugin
Copyright (C) 2014 Eldorado
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
import re
class trtResolver(ResolveUrl):
name = "trt"
domains = ["trt.pl"]
pattern = '(?://|\.)(trt\.pl)/(?:film)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
pages = re.findall('href="([^"]+)[^>]+class="mainPlayerQualityHref"[^>]+>(.*?)</a>', html)
if pages:
try: pages.sort(key=lambda x: int(x[1][:-1]), reverse=True)
except: pass
html = self.net.http_GET('https://www.trt.pl' + pages[0][0], headers=headers).content
sources = helpers.scrape_sources(html, scheme='https')
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return 'https://www.trt.pl/film/%s' % media_id
| gpl-2.0 | 3,321,776,893,372,562,000 | 37.608696 | 98 | 0.671734 | false |
mikesname/python-ocrlab | ocrsite/ocrlab/forms/__init__.py | 1 | 1274 | """Ocrlab forms."""
from django import forms
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from ocrlab.models import Preset
# Add to your settings file
CONTENT_TYPES = ['image', 'video']
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = "20971520"
class SimpleOcrForm(forms.Form):
file = forms.FileField()
async = forms.BooleanField(required=False)
preset = forms.ModelChoiceField(queryset=Preset.objects.all())
def clean_file(self):
content = self.cleaned_data['file']
content_type = content.content_type.split('/')[0]
if content_type in CONTENT_TYPES:
if content._size > MAX_UPLOAD_SIZE:
raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (
filesizeformat(MAX_UPLOAD_SIZE),
filesizeformat(content._size)))
else:
raise forms.ValidationError(_('File type is not supported'))
return content
class PresetForm(forms.ModelForm):
class Meta:
model = Preset
| mit | -1,963,474,988,661,655,800 | 28.627907 | 103 | 0.658556 | false |
all-of-us/raw-data-repository | rdr_service/dao/questionnaire_response_dao.py | 1 | 57185 | import json
import logging
import os
import re
from datetime import datetime
from dateutil import parser
from hashlib import md5
import pytz
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, subqueryload
from typing import Dict
from werkzeug.exceptions import BadRequest
from rdr_service.lib_fhir.fhirclient_1_0_6.models import questionnaireresponse as fhir_questionnaireresponse
from rdr_service.participant_enums import QuestionnaireResponseStatus, PARTICIPANT_COHORT_2_START_TIME,\
PARTICIPANT_COHORT_3_START_TIME
from rdr_service.app_util import get_account_origin_id, is_self_request
from rdr_service import storage
from rdr_service import clock, config
from rdr_service.code_constants import (
CABOR_SIGNATURE_QUESTION_CODE,
CONSENT_COHORT_GROUP_CODE,
CONSENT_FOR_DVEHR_MODULE,
CONSENT_FOR_GENOMICS_ROR_MODULE,
CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE,
CONSENT_FOR_STUDY_ENROLLMENT_MODULE,
CONSENT_PERMISSION_YES_CODE,
DVEHRSHARING_CONSENT_CODE_NOT_SURE,
DVEHRSHARING_CONSENT_CODE_YES,
DVEHR_SHARING_QUESTION_CODE,
EHR_CONSENT_QUESTION_CODE,
EHR_CONSENT_EXPIRED_QUESTION_CODE,
GENDER_IDENTITY_QUESTION_CODE,
LANGUAGE_OF_CONSENT,
PMI_SKIP_CODE,
PPI_EXTRA_SYSTEM,
PPI_SYSTEM,
RACE_QUESTION_CODE,
CONSENT_GROR_YES_CODE,
CONSENT_GROR_NO_CODE,
CONSENT_GROR_NOT_SURE,
GROR_CONSENT_QUESTION_CODE,
CONSENT_COPE_YES_CODE,
CONSENT_COPE_NO_CODE,
CONSENT_COPE_DEFERRED_CODE,
COPE_CONSENT_QUESTION_CODE,
STREET_ADDRESS_QUESTION_CODE,
STREET_ADDRESS2_QUESTION_CODE,
EHR_CONSENT_EXPIRED_YES,
PRIMARY_CONSENT_UPDATE_QUESTION_CODE,
COHORT_1_REVIEW_CONSENT_YES_CODE,
COPE_VACCINE_MINUTE_1_MODULE_CODE)
from rdr_service.dao.base_dao import BaseDao
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import (
ParticipantGenderAnswersDao,
ParticipantRaceAnswersDao,
ParticipantSummaryDao,
)
from rdr_service.dao.questionnaire_dao import QuestionnaireHistoryDao, QuestionnaireQuestionDao
from rdr_service.field_mappings import FieldType, QUESTIONNAIRE_MODULE_CODE_TO_FIELD, QUESTION_CODE_TO_FIELD
from rdr_service.model.code import Code, CodeType
from rdr_service.model.questionnaire import QuestionnaireHistory, QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer,\
QuestionnaireResponseExtension
from rdr_service.model.survey import Survey, SurveyQuestion, SurveyQuestionOption, SurveyQuestionType
from rdr_service.participant_enums import (
QuestionnaireDefinitionStatus,
QuestionnaireStatus,
TEST_LOGIN_PHONE_NUMBER_PREFIX,
get_gender_identity,
get_race,
ParticipantCohort,
ConsentExpireStatus)
_QUESTIONNAIRE_PREFIX = "Questionnaire/"
_QUESTIONNAIRE_HISTORY_SEGMENT = "/_history/"
_QUESTIONNAIRE_REFERENCE_FORMAT = _QUESTIONNAIRE_PREFIX + "{}" + _QUESTIONNAIRE_HISTORY_SEGMENT + "{}"
_SIGNED_CONSENT_EXTENSION = "http://terminology.pmi-ops.org/StructureDefinition/consent-form-signed-pdf"
_LANGUAGE_EXTENSION = "http://hl7.org/fhir/StructureDefinition/iso21090-ST-language"
_CATI_EXTENSION = "http://all-of-us.org/fhir/forms/non-participant-author"
def count_completed_baseline_ppi_modules(participant_summary):
baseline_ppi_module_fields = config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS, [])
return sum(
1
for field in baseline_ppi_module_fields
if getattr(participant_summary, field) == QuestionnaireStatus.SUBMITTED
)
def count_completed_ppi_modules(participant_summary):
ppi_module_fields = config.getSettingList(config.PPI_QUESTIONNAIRE_FIELDS, [])
return sum(
1 for field in ppi_module_fields if getattr(participant_summary, field, None) == QuestionnaireStatus.SUBMITTED
)
def get_first_completed_baseline_time(participant_summary):
baseline_authored = getattr(participant_summary, 'baselineQuestionnairesFirstCompleteAuthored')
if baseline_authored:
return baseline_authored
baseline_ppi_module_fields = config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS, [])
baseline_time = datetime(1000, 1, 1)
for field in baseline_ppi_module_fields:
field_value = getattr(participant_summary, field + "Authored")
if not field_value:
return None
else:
if field_value > baseline_time:
baseline_time = field_value
return baseline_time
class ResponseValidator:
def __init__(self, questionnaire_history: QuestionnaireHistory, session):
self.session = session
self._questionnaire_question_map = self._build_question_id_map(questionnaire_history)
self.survey = self._get_survey_for_questionnaire_history(questionnaire_history)
if self.survey is not None:
self._code_to_question_map = self._build_code_to_question_map()
if self.survey.redcapProjectId is not None:
logging.info('Validating imported survey')
# Get the skip code id
self.skip_code_id = self.session.query(Code.codeId).filter(Code.value == PMI_SKIP_CODE).scalar()
if self.skip_code_id is None:
logging.error('Unable to load PMI_SKIP code')
def _get_survey_for_questionnaire_history(self, questionnaire_history: QuestionnaireHistory):
survey_query = self.session.query(Survey).filter(
Survey.codeId.in_([concept.codeId for concept in questionnaire_history.concepts]),
Survey.importTime < questionnaire_history.created,
or_(
Survey.replacedTime.is_(None),
Survey.replacedTime > questionnaire_history.created
)
).options(
joinedload(Survey.questions).joinedload(SurveyQuestion.options).joinedload(SurveyQuestionOption.code)
)
num_surveys_found = survey_query.count()
if num_surveys_found == 0:
logging.warning(
f'No survey definition found for questionnaire id "{questionnaire_history.questionnaireId}" '
f'version "{questionnaire_history.version}"'
)
elif num_surveys_found > 1:
logging.warning(
f'Multiple survey definitions found for questionnaire id "{questionnaire_history.questionnaireId}" '
f'version "{questionnaire_history.version}"'
)
return survey_query.first()
def _build_code_to_question_map(self) -> Dict[int, SurveyQuestion]:
return {survey_question.code.codeId: survey_question for survey_question in self.survey.questions}
@classmethod
def _build_question_id_map(cls, questionnaire_history: QuestionnaireHistory) -> Dict[int, QuestionnaireQuestion]:
return {question.questionnaireQuestionId: question for question in questionnaire_history.questions}
@classmethod
def _validate_min_max(cls, answer, min_str, max_str, parser_function, question_code):
try:
if min_str:
min_parsed = parser_function(min_str)
if answer < min_parsed:
logging.warning(
f'Given answer "{answer}" is less than expected min "{min_str}" for question {question_code}'
)
if max_str:
max_parsed = parser_function(max_str)
if answer > max_parsed:
logging.warning(
f'Given answer "{answer}" is greater than expected max "{max_str}" for question {question_code}'
)
except (parser.ParserError, ValueError):
logging.error(f'Unable to parse validation string for question {question_code}', exc_info=True)
def _check_answer_has_expected_data_type(self, answer: QuestionnaireResponseAnswer,
question_definition: SurveyQuestion,
questionnaire_question: QuestionnaireQuestion):
question_code_value = questionnaire_question.code.value
if answer.valueCodeId == self.skip_code_id:
# Any questions can be answered with a skip, there's isn't anything to check in that case
return
if question_definition.questionType in (SurveyQuestionType.UNKNOWN,
SurveyQuestionType.DROPDOWN,
SurveyQuestionType.RADIO,
SurveyQuestionType.CHECKBOX):
number_of_selectable_options = len(question_definition.options)
if number_of_selectable_options == 0 and answer.valueCodeId is not None:
logging.warning(
f'Answer for {question_code_value} gives a value code id when no options are defined'
)
elif number_of_selectable_options > 0:
if answer.valueCodeId is None:
logging.warning(
f'Answer for {question_code_value} gives no value code id when the question has options defined'
)
elif answer.valueCodeId not in [option.codeId for option in question_definition.options]:
logging.warning(f'Code ID {answer.valueCodeId} is an invalid answer to {question_code_value}')
elif question_definition.questionType in (SurveyQuestionType.TEXT, SurveyQuestionType.NOTES):
if question_definition.validation is None and answer.valueString is None:
logging.warning(f'No valueString answer given for text-based question {question_code_value}')
elif question_definition.validation is not None and question_definition.validation != '':
if question_definition.validation.startswith('date'):
if answer.valueDate is None:
logging.warning(f'No valueDate answer given for date-based question {question_code_value}')
else:
self._validate_min_max(
answer.valueDate,
question_definition.validation_min,
question_definition.validation_max,
lambda validation_str: parser.parse(validation_str).date(),
question_code_value
)
elif question_definition.validation == 'integer':
if answer.valueInteger is None:
logging.warning(
f'No valueInteger answer given for integer-based question {question_code_value}'
)
else:
self._validate_min_max(
answer.valueInteger,
question_definition.validation_min,
question_definition.validation_max,
int,
question_code_value
)
else:
logging.warning(
f'Unrecognized validation string "{question_definition.validation}" '
f'for question {question_code_value}'
)
else:
# There aren't alot of surveys in redcap right now, so it's unclear how
# some of the other types would be answered
logging.warning(f'No validation check implemented for answer to {question_code_value} '
f'with question type {question_definition.questionType}')
def check_response(self, response: QuestionnaireResponse):
if self.survey is None:
return None
question_codes_answered = set()
for answer in response.answers:
questionnaire_question = self._questionnaire_question_map.get(answer.questionId)
if questionnaire_question is None:
# This is less validation, and more getting the object that should ideally already be linked
logging.error(f'Unable to find question {answer.questionId} in questionnaire history')
else:
survey_question = self._code_to_question_map.get(questionnaire_question.codeId)
if not survey_question:
logging.error(f'Question code used by the answer to question {answer.questionId} does not match a '
f'code found on the survey definition')
else:
self._check_answer_has_expected_data_type(answer, survey_question, questionnaire_question)
if survey_question.codeId in question_codes_answered:
logging.error(f'Too many answers given for {survey_question.code.value}')
elif survey_question.questionType != SurveyQuestionType.CHECKBOX:
if not (
survey_question.questionType == SurveyQuestionType.UNKNOWN and len(survey_question.options)
): # UNKNOWN question types could be for a Checkbox, so multiple answers should be allowed
question_codes_answered.add(survey_question.codeId)
class QuestionnaireResponseDao(BaseDao):
def __init__(self):
super(QuestionnaireResponseDao, self).__init__(QuestionnaireResponse)
def get_id(self, obj):
return obj.questionnaireResponseId
def get_with_session(self, session, obj_id, **kwargs):
result = super(QuestionnaireResponseDao, self).get_with_session(session, obj_id, **kwargs)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def get_with_children(self, questionnaire_response_id):
with self.session() as session:
query = session.query(QuestionnaireResponse).options(subqueryload(QuestionnaireResponse.answers))
result = query.get(questionnaire_response_id)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def _validate_model(self, session, obj): # pylint: disable=unused-argument
if not obj.questionnaireId:
raise BadRequest("QuestionnaireResponse.questionnaireId is required.")
if not obj.questionnaireVersion:
raise BadRequest("QuestionnaireResponse.questionnaireVersion is required.")
if not obj.answers:
logging.error("QuestionnaireResponse model has no answers. This is harmless but probably an error.")
def _validate_link_ids_from_resource_json_group(self, resource, link_ids):
"""
Look for question sections and validate the linkid in each answer. If there is a response
answer link id that does not exist in the questionnaire, then log a message. In
the future this may be changed to raising an exception.
This is a recursive function because answer groups can be nested.
:param resource: A group section of the response json.
:param link_ids: List of link ids to validate against.
"""
# note: resource can be either a dict or a list.
# if this is a dict and 'group' is found, always call ourselves.
if "group" in resource:
self._validate_link_ids_from_resource_json_group(resource["group"], link_ids)
if "question" not in resource and isinstance(resource, list):
for item in resource:
self._validate_link_ids_from_resource_json_group(item, link_ids)
# once we have a question section, iterate through list of answers.
if "question" in resource:
for section in resource["question"]:
link_id = section.get('linkId', None)
# Do not log warning or raise exception when link id is 'ignoreThis' for unit tests.
if (
link_id is not None
and link_id.lower() != "ignorethis"
and link_id not in link_ids
):
# The link_ids list being checked is a list of questions that have been answered,
# the list doesn't include valid link_ids that don't have answers
if "answer" in section:
logging.error(f'Questionnaire response contains invalid link ID "{link_id}"')
@staticmethod
def _imply_street_address_2_from_street_address_1(code_ids):
code_dao = CodeDao()
street_address_1_code = code_dao.get_code(PPI_SYSTEM, STREET_ADDRESS_QUESTION_CODE)
if street_address_1_code and street_address_1_code.codeId in code_ids:
street_address_2_code = code_dao.get_code(PPI_SYSTEM, STREET_ADDRESS2_QUESTION_CODE)
if street_address_2_code and street_address_2_code.codeId not in code_ids:
code_ids.append(street_address_2_code.codeId)
def insert_with_session(self, session, questionnaire_response):
# Look for a questionnaire that matches any of the questionnaire history records.
questionnaire_history = QuestionnaireHistoryDao().get_with_children_with_session(
session, [questionnaire_response.questionnaireId, questionnaire_response.questionnaireSemanticVersion]
)
if not questionnaire_history:
raise BadRequest(
f"Questionnaire with ID {questionnaire_response.questionnaireId}, \
semantic version {questionnaire_response.questionnaireSemanticVersion} is not found"
)
try:
answer_validator = ResponseValidator(questionnaire_history, session)
answer_validator.check_response(questionnaire_response)
except (AttributeError, ValueError, TypeError, LookupError):
logging.error('Code error encountered when validating the response', exc_info=True)
questionnaire_response.created = clock.CLOCK.now()
if not questionnaire_response.authored:
questionnaire_response.authored = questionnaire_response.created
# Put the ID into the resource.
resource_json = json.loads(questionnaire_response.resource)
resource_json["id"] = str(questionnaire_response.questionnaireResponseId)
questionnaire_response.resource = json.dumps(resource_json)
super().validate_origin(questionnaire_response)
# Gather the question ids and records that match the questions in the response
question_ids = [answer.questionId for answer in questionnaire_response.answers]
questions = QuestionnaireQuestionDao().get_all_with_session(session, question_ids)
# DA-623: raise error when response link ids do not match our question link ids.
# Gather the valid link ids for this question
link_ids = [question.linkId for question in questions]
# look through the response and verify each link id is valid for each question.
self._validate_link_ids_from_resource_json_group(resource_json, link_ids)
code_ids = [question.codeId for question in questions]
self._imply_street_address_2_from_street_address_1(code_ids)
current_answers = QuestionnaireResponseAnswerDao().get_current_answers_for_concepts(
session, questionnaire_response.participantId, code_ids
)
# IMPORTANT: update the participant summary first to grab an exclusive lock on the participant
# row. If you instead do this after the insert of the questionnaire response, MySQL will get a
# shared lock on the participant row due the foreign key, and potentially deadlock later trying
# to get the exclusive lock if another thread is updating the participant. See DA-269.
# (We need to lock both participant and participant summary because the summary row may not
# exist yet.)
if questionnaire_response.status == QuestionnaireResponseStatus.COMPLETED:
with self.session() as new_session:
self._update_participant_summary(
new_session, questionnaire_response, code_ids, questions, questionnaire_history, resource_json
)
super(QuestionnaireResponseDao, self).insert_with_session(session, questionnaire_response)
# Mark existing answers for the questions in this response given previously by this participant
# as ended.
for answer in current_answers:
answer.endTime = questionnaire_response.created
session.merge(answer)
return questionnaire_response
def _get_field_value(self, field_type, answer):
if field_type == FieldType.CODE:
return answer.valueCodeId
if field_type == FieldType.STRING:
return answer.valueString
if field_type == FieldType.DATE:
return answer.valueDate
raise BadRequest(f"Don't know how to map field of type {field_type}")
def _update_field(self, participant_summary, field_name, field_type, answer):
value = getattr(participant_summary, field_name)
new_value = self._get_field_value(field_type, answer)
if new_value is not None and value != new_value:
setattr(participant_summary, field_name, new_value)
return True
return False
@staticmethod
def _find_cope_month(questionnaire_history: QuestionnaireHistory, response_authored_date):
cope_form_id_map = config.getSettingJson(config.COPE_FORM_ID_MAP)
for form_ids_str, month_name in cope_form_id_map.items():
if questionnaire_history.externalId in form_ids_str.split(','):
return month_name
# If the questionnaire identifier isn't in the COPE map then using response authored date as a fallback
logging.error('Unrecognized identifier for COPE survey response '
f'(questionnaire_id: "{questionnaire_history.questionnaireId}", '
f'version: "{questionnaire_history.version}", identifier: "{questionnaire_history.externalId}"')
if response_authored_date < datetime(2020, 6, 4):
return 'May'
elif response_authored_date < datetime(2020, 7, 1):
return 'June'
elif response_authored_date < datetime(2020, 10, 5):
return 'July'
elif response_authored_date < datetime(2020, 12, 5): # Nov scheduled to close on Dec 3rd
return 'Nov'
elif response_authored_date < datetime(2021, 2, 8): # Feb scheduled to open on Feb 9th
return 'Dec'
else:
return 'Feb'
def _update_participant_summary(
self, session, questionnaire_response, code_ids, questions, questionnaire_history, resource_json
):
"""Updates the participant summary based on questions answered and modules completed
in the questionnaire response.
If no participant summary exists already, only a response to the study enrollment consent
questionnaire can be submitted, and it must include first and last name and e-mail address.
"""
# Block on other threads modifying the participant or participant summary.
participant = ParticipantDao().get_for_update(session, questionnaire_response.participantId)
if participant is None:
raise BadRequest(f"Participant with ID {questionnaire_response.participantId} is not found.")
participant_summary = participant.participantSummary
authored = questionnaire_response.authored
# If authored is a datetime and has tzinfo, convert to utc and remove tzinfo.
# The authored timestamps in the participant summary will already be in utc, but lack tzinfo.
if authored and isinstance(authored, datetime) and authored.tzinfo:
authored = authored.astimezone(pytz.utc).replace(tzinfo=None)
code_ids.extend([concept.codeId for concept in questionnaire_history.concepts])
code_dao = CodeDao()
something_changed = False
module_changed = False
# If no participant summary exists, make sure this is the study enrollment consent.
if not participant_summary:
consent_code = code_dao.get_code(PPI_SYSTEM, CONSENT_FOR_STUDY_ENROLLMENT_MODULE)
if not consent_code:
raise BadRequest("No study enrollment consent code found; import codebook.")
if not consent_code.codeId in code_ids:
raise BadRequest(
f"Can't submit order for participant {questionnaire_response.participantId} without consent"
)
if not _validate_consent_pdfs(resource_json):
raise BadRequest(
f"Unable to find signed consent-for-enrollment file for participant"
)
participant_summary = ParticipantDao.create_summary_for_participant(participant)
something_changed = True
# Fetch the codes for all questions and concepts
codes = code_dao.get_with_ids(code_ids)
code_map = {code.codeId: code for code in codes if code.system == PPI_SYSTEM}
question_map = {question.questionnaireQuestionId: question for question in questions}
race_code_ids = []
gender_code_ids = []
ehr_consent = False
gror_consent = None
dvehr_consent = QuestionnaireStatus.SUBMITTED_NO_CONSENT
street_address_submitted = False
street_address2_submitted = False
# Set summary fields for answers that have questions with codes found in QUESTION_CODE_TO_FIELD
for answer in questionnaire_response.answers:
question = question_map.get(answer.questionId)
if question:
code = code_map.get(question.codeId)
if code:
if code.value == GENDER_IDENTITY_QUESTION_CODE:
gender_code_ids.append(answer.valueCodeId)
elif code.value == STREET_ADDRESS_QUESTION_CODE:
street_address_submitted = answer.valueString is not None
elif code.value == STREET_ADDRESS2_QUESTION_CODE:
street_address2_submitted = answer.valueString is not None
summary_field = QUESTION_CODE_TO_FIELD.get(code.value)
if summary_field:
if something_changed:
self._update_field(participant_summary, summary_field[0], summary_field[1], answer)
else:
something_changed = self._update_field(
participant_summary, summary_field[0], summary_field[1], answer
)
elif code.value == RACE_QUESTION_CODE:
race_code_ids.append(answer.valueCodeId)
elif code.value == DVEHR_SHARING_QUESTION_CODE:
code = code_dao.get(answer.valueCodeId)
if code and code.value == DVEHRSHARING_CONSENT_CODE_YES:
dvehr_consent = QuestionnaireStatus.SUBMITTED
elif code and code.value == DVEHRSHARING_CONSENT_CODE_NOT_SURE:
dvehr_consent = QuestionnaireStatus.SUBMITTED_NOT_SURE
elif code.value == EHR_CONSENT_QUESTION_CODE:
code = code_dao.get(answer.valueCodeId)
if participant_summary.ehrConsentExpireStatus == ConsentExpireStatus.EXPIRED and \
authored > participant_summary.ehrConsentExpireAuthored:
participant_summary.ehrConsentExpireStatus = ConsentExpireStatus.UNSET
participant_summary.ehrConsentExpireAuthored = None
participant_summary.ehrConsentExpireTime = None
if code and code.value == CONSENT_PERMISSION_YES_CODE:
ehr_consent = True
if participant_summary.consentForElectronicHealthRecordsFirstYesAuthored is None:
participant_summary.consentForElectronicHealthRecordsFirstYesAuthored = authored
if participant_summary.ehrConsentExpireStatus == ConsentExpireStatus.EXPIRED and \
authored < participant_summary.ehrConsentExpireAuthored:
ehr_consent = False
elif code.value == EHR_CONSENT_EXPIRED_QUESTION_CODE:
if answer.valueString and answer.valueString == EHR_CONSENT_EXPIRED_YES:
participant_summary.ehrConsentExpireStatus = ConsentExpireStatus.EXPIRED
participant_summary.ehrConsentExpireAuthored = authored
participant_summary.ehrConsentExpireTime = questionnaire_response.created
something_changed = True
elif code.value == CABOR_SIGNATURE_QUESTION_CODE:
if answer.valueUri or answer.valueString:
# TODO: validate the URI? [DA-326]
if not participant_summary.consentForCABoR:
participant_summary.consentForCABoR = True
participant_summary.consentForCABoRTime = questionnaire_response.created
participant_summary.consentForCABoRAuthored = authored
something_changed = True
elif code.value == GROR_CONSENT_QUESTION_CODE:
if code_dao.get(answer.valueCodeId).value == CONSENT_GROR_YES_CODE:
gror_consent = QuestionnaireStatus.SUBMITTED
elif code_dao.get(answer.valueCodeId).value == CONSENT_GROR_NO_CODE:
gror_consent = QuestionnaireStatus.SUBMITTED_NO_CONSENT
elif code_dao.get(answer.valueCodeId).value == CONSENT_GROR_NOT_SURE:
gror_consent = QuestionnaireStatus.SUBMITTED_NOT_SURE
elif code.value == COPE_CONSENT_QUESTION_CODE:
answer_value = code_dao.get(answer.valueCodeId).value
if answer_value == CONSENT_COPE_YES_CODE:
submission_status = QuestionnaireStatus.SUBMITTED
elif answer_value in [CONSENT_COPE_NO_CODE, CONSENT_COPE_DEFERRED_CODE]:
submission_status = QuestionnaireStatus.SUBMITTED_NO_CONSENT
else:
submission_status = QuestionnaireStatus.SUBMITTED_INVALID
month_name = self._find_cope_month(questionnaire_history, authored)
setattr(participant_summary, f'questionnaireOnCope{month_name}', submission_status)
setattr(participant_summary, f'questionnaireOnCope{month_name}Time',
questionnaire_response.created)
setattr(participant_summary, f'questionnaireOnCope{month_name}Authored', authored)
# COPE Survey changes need to update number of modules complete in summary
module_changed = True
elif code.value == PRIMARY_CONSENT_UPDATE_QUESTION_CODE:
answer_value = code_dao.get(answer.valueCodeId).value
if answer_value == COHORT_1_REVIEW_CONSENT_YES_CODE:
participant_summary.consentForStudyEnrollmentAuthored = authored
elif code.value == CONSENT_COHORT_GROUP_CODE:
try:
cohort_group = int(answer.valueString)
# Only checking that we know of the cohort group so we don't crash when
# storing in the Enum column
cohort_numbers = ParticipantCohort.numbers()
if cohort_group not in cohort_numbers:
raise ValueError
else:
participant_summary.consentCohort = answer.valueString
something_changed = True
except ValueError:
logging.error(f'Invalid value given for cohort group: received "{answer.valueString}"')
# If the answer for line 2 of the street address was left out then it needs to be clear on summary.
# So when it hasn't been submitted and there is something set for streetAddress2 we want to clear it out.
summary_has_street_line_two = participant_summary.streetAddress2 is not None \
and participant_summary.streetAddress2 != ""
if street_address_submitted and not street_address2_submitted and summary_has_street_line_two:
something_changed = True
participant_summary.streetAddress2 = None
# If race was provided in the response in one or more answers, set the new value.
if race_code_ids:
race_codes = [code_dao.get(code_id) for code_id in race_code_ids]
race = get_race(race_codes)
if race != participant_summary.race:
participant_summary.race = race
something_changed = True
if gender_code_ids:
gender_codes = [code_dao.get(code_id) for code_id in gender_code_ids]
gender = get_gender_identity(gender_codes)
if gender != participant_summary.genderIdentity:
participant_summary.genderIdentity = gender
something_changed = True
dna_program_consent_update_code = config.getSettingJson(config.DNA_PROGRAM_CONSENT_UPDATE_CODE, None)
# Set summary fields to SUBMITTED for questionnaire concepts that are found in
# QUESTIONNAIRE_MODULE_CODE_TO_FIELD
for concept in questionnaire_history.concepts:
code = code_map.get(concept.codeId)
if code:
summary_field = QUESTIONNAIRE_MODULE_CODE_TO_FIELD.get(code.value)
if summary_field:
new_status = QuestionnaireStatus.SUBMITTED
if code.value == CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE and not ehr_consent:
new_status = QuestionnaireStatus.SUBMITTED_NO_CONSENT
elif code.value == CONSENT_FOR_DVEHR_MODULE:
new_status = dvehr_consent
elif code.value == CONSENT_FOR_GENOMICS_ROR_MODULE:
if gror_consent is None:
raise BadRequest(
"GROR Consent answer is required to match code {}."
.format([CONSENT_GROR_YES_CODE, CONSENT_GROR_NO_CODE, CONSENT_GROR_NOT_SURE])
)
new_status = gror_consent
elif code.value == CONSENT_FOR_STUDY_ENROLLMENT_MODULE:
participant_summary.semanticVersionForPrimaryConsent = \
questionnaire_response.questionnaireSemanticVersion
if participant_summary.consentCohort is None or \
participant_summary.consentCohort == ParticipantCohort.UNSET:
if participant_summary.participantOrigin == 'vibrent':
logging.warning(f'Missing expected consent cohort information for participant '
f'{participant_summary.participantId}')
if authored >= PARTICIPANT_COHORT_3_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_3
elif PARTICIPANT_COHORT_2_START_TIME <= authored < PARTICIPANT_COHORT_3_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_2
elif authored < PARTICIPANT_COHORT_2_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_1
if participant_summary.consentForStudyEnrollmentFirstYesAuthored is None:
participant_summary.consentForStudyEnrollmentFirstYesAuthored = authored
# set language of consent to participant summary
for extension in resource_json.get("extension", []):
if (
extension.get("url") == _LANGUAGE_EXTENSION
and extension.get("valueCode") in LANGUAGE_OF_CONSENT
):
if participant_summary.primaryLanguage != extension.get("valueCode"):
participant_summary.primaryLanguage = extension.get("valueCode")
something_changed = True
break
elif (
extension.get("url") == _LANGUAGE_EXTENSION
and extension.get("valueCode") not in LANGUAGE_OF_CONSENT
):
logging.warning(f"consent language {extension.get('valueCode')} not recognized.")
if getattr(participant_summary, summary_field) != new_status:
setattr(participant_summary, summary_field, new_status)
setattr(participant_summary, summary_field + "Time", questionnaire_response.created)
setattr(participant_summary, summary_field + "Authored", authored)
something_changed = True
module_changed = True
elif dna_program_consent_update_code is not None and code.value == dna_program_consent_update_code:
# If we receive a questionnaire response it means they've viewed the update and we should mark
# them as submitted
participant_summary.questionnaireOnDnaProgram = QuestionnaireStatus.SUBMITTED
participant_summary.questionnaireOnDnaProgramAuthored = authored
elif code.value == COPE_VACCINE_MINUTE_1_MODULE_CODE \
and participant_summary.questionnaireOnCopeVaccineMinute1 != QuestionnaireStatus.SUBMITTED:
participant_summary.questionnaireOnCopeVaccineMinute1 = QuestionnaireStatus.SUBMITTED
participant_summary.questionnaireOnCopeVaccineMinute1Authored = authored
module_changed = True
if module_changed:
participant_summary.numCompletedBaselinePPIModules = count_completed_baseline_ppi_modules(
participant_summary
)
participant_summary.baselineQuestionnairesFirstCompleteAuthored = get_first_completed_baseline_time(
participant_summary
)
participant_summary.numCompletedPPIModules = count_completed_ppi_modules(participant_summary)
if something_changed:
first_last = (participant_summary.firstName, participant_summary.lastName)
email_phone = (participant_summary.email, participant_summary.loginPhoneNumber)
if not all(first_last):
raise BadRequest(
"First name ({:s}), and last name ({:s}) required for consenting."
.format(*["present" if part else "missing" for part in first_last])
)
if not any(email_phone):
raise BadRequest(
"Email address ({:s}), or phone number ({:s}) required for consenting."
.format(*["present" if part else "missing" for part in email_phone])
)
ParticipantSummaryDao().update_enrollment_status(participant_summary)
participant_summary.lastModified = clock.CLOCK.now()
session.merge(participant_summary)
# switch account to test account if the phone number starts with 4442
# this is a requirement from PTSC
ph = getattr(participant_summary, 'loginPhoneNumber') or \
getattr(participant_summary, 'phoneNumber') or 'None'
ph_clean = re.sub('[\(|\)|\-|\s]', '', ph)
if ph_clean.startswith(TEST_LOGIN_PHONE_NUMBER_PREFIX):
ParticipantDao().switch_to_test_account(session, participant)
# update participant gender/race answers table
if race_code_ids:
participant_race_answer_dao = ParticipantRaceAnswersDao()
participant_race_answer_dao.update_race_answers_with_session(
session, participant.participantId, race_code_ids
)
if gender_code_ids:
participant_gender_race_dao = ParticipantGenderAnswersDao()
participant_gender_race_dao.update_gender_answers_with_session(
session, participant.participantId, gender_code_ids
)
def insert(self, obj):
if obj.questionnaireResponseId:
return super(QuestionnaireResponseDao, self).insert(obj)
return self._insert_with_random_id(obj, ["questionnaireResponseId"])
def read_status(self, fhir_response: fhir_questionnaireresponse.QuestionnaireResponse):
status_map = {
'in-progress': QuestionnaireResponseStatus.IN_PROGRESS,
'completed': QuestionnaireResponseStatus.COMPLETED,
'amended': QuestionnaireResponseStatus.AMENDED,
'entered-in-error': QuestionnaireResponseStatus.ENTERED_IN_ERROR,
'stopped': QuestionnaireResponseStatus.STOPPED
}
if fhir_response.status not in status_map:
raise BadRequest(f'Unrecognized status "{fhir_response.status}"')
else:
return status_map[fhir_response.status]
@classmethod
def calculate_answer_hash(cls, response_json):
answer_list_json = response_json.get('group', '')
answer_list_str = json.dumps(answer_list_json)
return md5(answer_list_str.encode('utf-8')).hexdigest()
@classmethod
def _extension_from_fhir_object(cls, fhir_extension):
# Get the non-empty values from the FHIR extension object for the url field and
# any field with a name that starts with "value"
fhir_fields = fhir_extension.__dict__
filtered_values = {}
for name, value in fhir_fields.items():
if value is not None and (name == 'url' or name.startswith('value')):
filtered_values[name] = value
return QuestionnaireResponseExtension(**filtered_values)
@classmethod
def _parse_external_identifier(cls, fhir_qr):
external_id = None
if fhir_qr.identifier:
external_id = fhir_qr.identifier.value
if external_id and len(external_id) > QuestionnaireResponse.externalId.type.length:
logging.warning('External id was larger than expected, unable to save it to the database.')
external_id = None
return external_id
@classmethod
def extension_models_from_fhir_objects(cls, fhir_extensions):
if fhir_extensions:
try:
return [cls._extension_from_fhir_object(extension) for extension in fhir_extensions]
except TypeError:
logging.warning('Unexpected extension value', exc_info=True)
return []
else:
return []
def from_client_json(self, resource_json, participant_id=None, client_id=None):
# pylint: disable=unused-argument
# Parse the questionnaire response, but preserve the original response when persisting
fhir_qr = fhir_questionnaireresponse.QuestionnaireResponse(resource_json)
patient_id = fhir_qr.subject.reference
if patient_id != "Patient/P{}".format(participant_id):
msg = "Questionnaire response subject reference does not match participant_id {}"
raise BadRequest(msg.format(participant_id))
questionnaire = self._get_questionnaire(fhir_qr.questionnaire, resource_json)
if questionnaire.status == QuestionnaireDefinitionStatus.INVALID:
raise BadRequest(
f"Submitted questionnaire that is marked as invalid: questionnaire ID {questionnaire.questionnaireId}"
)
authored = None
if fhir_qr.authored and fhir_qr.authored.date:
authored = fhir_qr.authored.date
language = None
non_participant_author = None
if fhir_qr.extension:
for ext in fhir_qr.extension:
if "iso21090-ST-language" in ext.url:
language = ext.valueCode[:2]
if ext.url == _CATI_EXTENSION:
non_participant_author = ext.valueString
qr = QuestionnaireResponse(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
questionnaireSemanticVersion=questionnaire.semanticVersion,
participantId=participant_id,
nonParticipantAuthor=non_participant_author,
authored=authored,
language=language,
resource=json.dumps(resource_json),
status=self.read_status(fhir_qr),
answerHash=self.calculate_answer_hash(resource_json),
externalId=self._parse_external_identifier(fhir_qr)
)
if fhir_qr.group is not None:
# Extract a code map and answers from the questionnaire response.
code_map, answers = self._extract_codes_and_answers(fhir_qr.group, questionnaire)
if not answers:
logging.error("No answers from QuestionnaireResponse JSON. This is harmless but probably an error.")
# Get or insert codes, and retrieve their database IDs.
code_id_map = CodeDao().get_internal_id_code_map(code_map)
# Now add the child answers, using the IDs in code_id_map
self._add_answers(qr, code_id_map, answers)
qr.extensions = self.extension_models_from_fhir_objects(fhir_qr.extension)
return qr
@staticmethod
def _get_questionnaire(questionnaire, resource_json):
"""Retrieves the questionnaire referenced by this response; mutates the resource JSON to include
the version if it doesn't already.
If a questionnaire has a history element it goes into the if block here."""
# if history...
if not questionnaire.reference.startswith(_QUESTIONNAIRE_PREFIX):
raise BadRequest(f"Questionnaire reference {questionnaire.reference} is invalid")
questionnaire_reference = questionnaire.reference[len(_QUESTIONNAIRE_PREFIX):]
# If the questionnaire response specifies the version of the questionnaire it's for, use it.
if _QUESTIONNAIRE_HISTORY_SEGMENT in questionnaire_reference:
questionnaire_ref_parts = questionnaire_reference.split(_QUESTIONNAIRE_HISTORY_SEGMENT)
if len(questionnaire_ref_parts) != 2:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
try:
questionnaire_id = int(questionnaire_ref_parts[0])
semantic_version = questionnaire_ref_parts[1]
q = QuestionnaireHistoryDao().get_with_children((questionnaire_id, semantic_version))
if not q:
raise BadRequest(f"Questionnaire with id {questionnaire_id}, semantic version {semantic_version} "
f"is not found")
return q
except ValueError:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
else:
# if no questionnaire/history...
try:
questionnaire_id = int(questionnaire_reference)
from rdr_service.dao.questionnaire_dao import QuestionnaireDao
q = QuestionnaireDao().get_with_children(questionnaire_id)
if not q:
raise BadRequest(f"Questionnaire with id {questionnaire_id} is not found")
# Mutate the questionnaire reference to include the version.
questionnaire_reference = _QUESTIONNAIRE_REFERENCE_FORMAT.format(questionnaire_id, q.semanticVersion)
resource_json["questionnaire"]["reference"] = questionnaire_reference
return q
except ValueError:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
@classmethod
def _extract_codes_and_answers(cls, group, q):
"""Returns (system, code) -> (display, code type, question code id) code map
and (QuestionnaireResponseAnswer, (system, code)) answer pairs.
"""
code_map = {}
answers = []
link_id_to_question = {}
if q.questions:
link_id_to_question = {question.linkId: question for question in q.questions}
cls._populate_codes_and_answers(group, code_map, answers, link_id_to_question, q.questionnaireId)
return (code_map, answers)
@classmethod
def _populate_codes_and_answers(cls, group, code_map, answers, link_id_to_question, questionnaire_id):
"""Populates code_map with (system, code) -> (display, code type, question code id)
and answers with (QuestionnaireResponseAnswer, (system, code)) pairs."""
if group.question:
for question in group.question:
if question.linkId and question.answer:
qq = link_id_to_question.get(question.linkId)
if qq:
for answer in question.answer:
qr_answer = QuestionnaireResponseAnswer(questionId=qq.questionnaireQuestionId)
system_and_code = None
ignore_answer = False
if answer.valueCoding:
if not answer.valueCoding.system:
raise BadRequest(f"No system provided for valueCoding: {question.linkId}")
if not answer.valueCoding.code:
raise BadRequest(f"No code provided for valueCoding: {question.linkId}")
if answer.valueCoding.system == PPI_EXTRA_SYSTEM:
# Ignore answers from the ppi-extra system, as they aren't used for analysis.
ignore_answer = True
else:
system_and_code = (answer.valueCoding.system, answer.valueCoding.code)
if not system_and_code in code_map:
code_map[system_and_code] = (
answer.valueCoding.display,
CodeType.ANSWER,
qq.codeId,
)
if not ignore_answer:
if answer.valueDecimal is not None:
qr_answer.valueDecimal = answer.valueDecimal
if answer.valueInteger is not None:
qr_answer.valueInteger = answer.valueInteger
if answer.valueString is not None:
answer_length = len(answer.valueString)
max_length = QuestionnaireResponseAnswer.VALUE_STRING_MAXLEN
if answer_length > max_length:
raise BadRequest(
f"String value too long (len={answer_length}); "
f"must be less than {max_length}"
)
qr_answer.valueString = answer.valueString
if answer.valueDate is not None:
qr_answer.valueDate = answer.valueDate.date
if answer.valueDateTime is not None:
qr_answer.valueDateTime = answer.valueDateTime.date
if answer.valueBoolean is not None:
qr_answer.valueBoolean = answer.valueBoolean
if answer.valueUri is not None:
qr_answer.valueUri = answer.valueUri
answers.append((qr_answer, system_and_code))
if answer.group:
for sub_group in answer.group:
cls._populate_codes_and_answers(
sub_group, code_map, answers, link_id_to_question, questionnaire_id
)
if group.group:
for sub_group in group.group:
cls._populate_codes_and_answers(sub_group, code_map, answers, link_id_to_question, questionnaire_id)
@staticmethod
def _add_answers(qr, code_id_map, answers):
for answer, system_and_code in answers:
if system_and_code:
system, code = system_and_code
answer.valueCodeId = code_id_map.get(system, code)
qr.answers.append(answer)
def _validate_consent_pdfs(resource):
"""Checks for any consent-form-signed-pdf extensions and validates their PDFs in GCS."""
if resource.get("resourceType") != "QuestionnaireResponse":
raise ValueError(f'Expected QuestionnaireResponse for "resourceType" in {resource}.')
# We now lookup up consent bucket names by participant origin id.
p_origin = get_account_origin_id()
consent_bucket_config = config.getSettingJson(config.CONSENT_PDF_BUCKET)
# If we don't match the origin id, just return the first bucket in the dict.
try:
consent_bucket = consent_bucket_config.get(p_origin, consent_bucket_config[next(iter(consent_bucket_config))])
except AttributeError:
pass
found_pdf = False
for extension in resource.get("extension", []):
if extension["url"] != _SIGNED_CONSENT_EXTENSION:
continue
local_pdf_path = extension["valueString"]
_, ext = os.path.splitext(local_pdf_path)
if ext.lower() != ".pdf":
raise BadRequest(f"Signed PDF must end in .pdf, found {ext} (from {local_pdf_path}).")
# Treat the value as a bucket-relative path, allowing a leading slash or not.
if not local_pdf_path.startswith("/"):
local_pdf_path = "/" + local_pdf_path
_raise_if_gcloud_file_missing("/{}{}".format(consent_bucket, local_pdf_path))
found_pdf = True
if config.GAE_PROJECT == 'localhost' or is_self_request():
# Pretend we found a valid consent if we're running on a development machine
# skip checking for self request from fake participant generating
return True
else:
return found_pdf
def _raise_if_gcloud_file_missing(path):
"""Checks that a GCS file exists.
Args:
path: An absolute Google Cloud Storage path, starting with /$BUCKET/.
Raises:
BadRequest if the path does not reference a file.
"""
storage_provier = storage.get_storage_provider()
if not storage_provier.exists(path):
raise BadRequest(f"Google Cloud Storage file not found in {path}.")
class QuestionnaireResponseAnswerDao(BaseDao):
def __init__(self):
super(QuestionnaireResponseAnswerDao, self).__init__(QuestionnaireResponseAnswer)
def get_id(self, obj):
return obj.questionnaireResponseAnswerId
def get_current_answers_for_concepts(self, session, participant_id, code_ids):
"""Return any answers the participant has previously given to questions with the specified
code IDs."""
if not code_ids:
return []
return (
session.query(QuestionnaireResponseAnswer)
.join(QuestionnaireResponse)
.join(QuestionnaireQuestion)
.filter(QuestionnaireResponse.participantId == participant_id)
.filter(QuestionnaireResponseAnswer.endTime == None)
.filter(QuestionnaireQuestion.codeId.in_(code_ids))
.all()
)
| bsd-3-clause | 193,286,261,942,352,830 | 52.594189 | 120 | 0.611594 | false |
akunze3/pytrajectory | examples/ex8_ConstrainedDoublePendulum.py | 1 | 8675 | # constrained double pendulum
# import all we need for solving the problem
from pytrajectory import ControlSystem
import numpy as np
import sympy as sp
from sympy import cos, sin, Matrix
from numpy import pi
# to define a callable function that returns the vectorfield
# we first solve the motion equations of form Mx = B
def solve_motion_equations(M, B, state_vars=[], input_vars=[], parameters_values=dict()):
'''
Solves the motion equations given by the mass matrix and right hand side
to define a callable function for the vector field of the respective
control system.
Parameters
----------
M : sympy.Matrix
A sympy.Matrix containing sympy expressions and symbols that represents
the mass matrix of the control system.
B : sympy.Matrix
A sympy.Matrix containing sympy expressions and symbols that represents
the right hand site of the motion equations.
state_vars : list
A list with sympy.Symbols's for each state variable.
input_vars : list
A list with sympy.Symbols's for each input variable.
parameter_values : dict
A dictionary with a key:value pair for each system parameter.
Returns
-------
callable
'''
M_shape = M.shape
B_shape = B.shape
assert(M_shape[0] == B_shape[0])
# at first we create a buffer for the string that we complete and execute
# to dynamically define a function and return it
fnc_str_buffer ='''
def f(x, u):
# System variables
%s # x_str
%s # u_str
# Parameters
%s # par_str
# Sympy Common Expressions
%s # cse_str
# Vectorfield
%s # ff_str
return ff
'''
#################################
# handle system state variables #
#################################
# --> leads to x_str which shows how to unpack the state variables
x_str = ''
for var in state_vars:
x_str += '%s, '%str(var)
# as a last we remove the trailing '; ' to avoid syntax erros
x_str = x_str + '= x'
##########################
# handle input variables #
##########################
# --> leads to u_str which will show how to unpack the inputs of the control system
u_str = ''
for var in input_vars:
u_str += '%s, '%str(var)
# after we remove the trailing '; ' to avoid syntax errors x_str will look like:
# 'u1, u2, ... , um = u'
u_str = u_str + '= u'
############################
# handle system parameters #
############################
# --> leads to par_str
par_str = ''
for k, v in parameters_values.items():
# 'k' is the name of a system parameter such as mass or gravitational acceleration
# 'v' is its value in SI units
par_str += '%s = %s; '%(str(k), str(v))
# as a last we remove the trailing '; ' from par_str to avoid syntax errors
par_str = par_str[:-2]
# now solve the motion equations w.r.t. the accelerations
sol = M.solve(B)
# use SymPy's Common Subexpression Elimination
cse_list, cse_res = sp.cse(sol, symbols=sp.numbered_symbols('q'))
################################
# handle common subexpressions #
################################
# --> leads to cse_str
cse_str = ''
#cse_list = [(str(l), str(r)) for l, r in cse_list]
for cse_pair in cse_list:
cse_str += '%s = %s; '%(str(cse_pair[0]), str(cse_pair[1]))
# add result of cse
for i in xrange(M_shape[0]):
cse_str += 'q%d_dd = %s; '%(i, str(cse_res[0][i]))
cse_str = cse_str[:-2]
######################
# create vectorfield #
######################
# --> leads to ff_str
ff_str = 'ff = ['
for i in xrange(M_shape[0]):
ff_str += '%s, '%str(state_vars[2*i+1])
ff_str += 'q%s_dd, '%(i)
# remove trailing ',' and add closing brackets
ff_str = ff_str[:-2] + ']'
############################
# Create callable function #
############################
# now we can replace all placeholders in the function string buffer
fnc_str = fnc_str_buffer%(x_str, u_str, par_str, cse_str, ff_str)
# and finally execute it which will create a python function 'f'
exec(fnc_str)
# now we have defined a callable function that can be used within PyTrajectory
return f
# system and input variables
state_vars = sp.symbols('x, dx, phi1, dphi1, phi2, dphi2')
input_vars = sp.symbols('F,')
x, dx, phi1, dphi1, phi2, dphi2 = state_vars
F, = input_vars
# parameters
l1 = 0.25 # 1/2 * length of the pendulum 1
l2 = 0.25 # 1/2 * length of the pendulum
m1 = 0.1 # mass of the pendulum 1
m2 = 0.1 # mass of the pendulum 2
m = 1.0 # mass of the car
g = 9.81 # gravitational acceleration
I1 = 4.0/3.0 * m1 * l1**2 # inertia 1
I2 = 4.0/3.0 * m2 * l2**2 # inertia 2
param_values = {'l1':l1, 'l2':l2, 'm1':m1, 'm2':m2, 'm':m, 'g':g, 'I1':I1, 'I2':I2}
# mass matrix
M = Matrix([[ m+m1+m2, (m1+2*m2)*l1*cos(phi1), m2*l2*cos(phi2)],
[(m1+2*m2)*l1*cos(phi1), I1+(m1+4*m2)*l1**2, 2*m2*l1*l2*cos(phi2-phi1)],
[ m2*l2*cos(phi2), 2*m2*l1*l2*cos(phi2-phi1), I2+m2*l2**2]])
# and right hand site
B = Matrix([[ F + (m1+2*m2)*l1*sin(phi1)*dphi1**2 + m2*l2*sin(phi2)*dphi2**2 ],
[ (m1+2*m2)*g*l1*sin(phi1) + 2*m2*l1*l2*sin(phi2-phi1)*dphi2**2 ],
[ m2*g*l2*sin(phi2) + 2*m2*l1*l2*sin(phi1-phi2)*dphi1**2 ]])
f = solve_motion_equations(M, B, state_vars, input_vars)
# then we specify all boundary conditions
a = 0.0
xa = [0.0, 0.0, pi, 0.0, pi, 0.0]
b = 4.0
xb = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
ua = [0.0]
ub = [0.0]
# here we specify the constraints for the velocity of the car
con = {0 : [-1.0, 1.0],
1 : [-2.0, 2.0]}
# now we create our Trajectory object and alter some method parameters via the keyword arguments
S = ControlSystem(f, a, b, xa, xb, ua, ub, constraints=con,
eps=2e-1, su=20, kx=2, use_chains=False,
use_std_approach=False)
# time to run the iteration
x, u = S.solve()
# the following code provides an animation of the system above
# for a more detailed explanation have a look at the 'Visualisation' section in the documentation
import sys
import matplotlib as mpl
from pytrajectory.visualisation import Animation
def draw(xt, image):
x = xt[0]
phi1 = xt[2]
phi2 = xt[4]
car_width = 0.05
car_heigth = 0.02
rod_length = 2.0 * 0.25
pendulum_size = 0.015
x_car = x
y_car = 0
x_pendulum1 = x_car + rod_length * sin(phi1)
y_pendulum1 = rod_length * cos(phi1)
x_pendulum2 = x_pendulum1 + rod_length * sin(phi2)
y_pendulum2 = y_pendulum1 + rod_length * cos(phi2)
# create image
pendulum1 = mpl.patches.Circle(xy=(x_pendulum1, y_pendulum1), radius=pendulum_size, color='black')
pendulum2 = mpl.patches.Circle(xy=(x_pendulum2, y_pendulum2), radius=pendulum_size, color='black')
car = mpl.patches.Rectangle((x_car-0.5*car_width, y_car-car_heigth), car_width, car_heigth,
fill=True, facecolor='grey', linewidth=2.0)
joint = mpl.patches.Circle((x_car,0), 0.005, color='black')
rod1 = mpl.lines.Line2D([x_car,x_pendulum1], [y_car,y_pendulum1],
color='black', zorder=1, linewidth=2.0)
rod2 = mpl.lines.Line2D([x_pendulum1,x_pendulum2], [y_pendulum1,y_pendulum2],
color='black', zorder=1, linewidth=2.0)
# add the patches and lines to the image
image.patches.append(pendulum1)
image.patches.append(pendulum2)
image.patches.append(car)
image.patches.append(joint)
image.lines.append(rod1)
image.lines.append(rod2)
# and return the image
return image
if not 'no-pickle' in sys.argv:
# here we save the simulation results so we don't have to run
# the iteration again in case the following fails
S.save(fname='ex8_ConstrainedDoublePendulum.pcl')
if 'plot' in sys.argv or 'animate' in sys.argv:
# create Animation object
A = Animation(drawfnc=draw, simdata=S.sim_data,
plotsys=[(0,'$x$'),(1,'$\\dot{x}$')], plotinputs=[(0,'$u$')])
xmin = np.min(S.sim_data[1][:,0])
xmax = np.max(S.sim_data[1][:,0])
A.set_limits(xlim=(xmin - 1.0, xmax + 1.0), ylim=(-1.2,1.2))
if 'plot' in sys.argv:
A.show(t=S.b)
if 'animate' in sys.argv:
A.animate()
A.save('ex8_ConstrainedDoublePendulum.gif')
| bsd-3-clause | -5,700,278,940,750,666,000 | 30.660584 | 102 | 0.566686 | false |
phageghost/pg_tools | pgtools/rnatools.py | 1 | 13893 | import os
import itertools
import numpy
import scipy
import pandas
import seaborn
import matplotlib.pyplot as plt
from pgtools import toolbox
PSEUDO_COUNT = 1
EXPRESSION_THRESHOLD = 1
FIG_EXTS = ['pdf', 'png']
PNG_DPI = 600
def load_and_clean(datafile):
exp_data = pandas.read_csv(datafile, sep='\t', index_col=0).T
exp_data.index.name = 'Sample'
# trim sample names
new_index = []
for i in range(exp_data.shape[0]):
index_item = exp_data.index[i]
if i >= 7:
if index_item.find(' reads') > -1:
index_item = index_item[:index_item.find(' reads')]
if index_item.find('/') > -1:
index_item = index_item.split('/')[-1]
new_index.append(index_item)
exp_data.index = new_index
exp_data.columns.name='Entrez mRNA ID'
return exp_data
def trim_rna_file(input_filename, output_filename='', fix_names=True, transcript_to_gene=False, sep='\t'):
"""
Given the filename of a HOMER-output RNA-seq DataFrame, generate a
new file containing a new dataframe with the gene info columns (0-6)
removed. <output_filename> defaults to input_filename with "_trimmed"
appended to the filename mantissa.
If :param:`transcript_to_gene` is True, replace the refseq Transcript ID with the gene name from the annotation
"""
path, prefix, suffix = toolbox.parse_path(input_filename)
toolbox.establish_path(path)
rna_data = pandas.read_csv(input_filename, sep=sep, index_col=0)
if transcript_to_gene:
gene_names = [anno.split('|')[0] for anno in rna_data.iloc[:,6]]
rna_data.index = gene_names
rna_data = rna_data.iloc[:,7:]
# print(rna_data.columns)
if fix_names:
rna_data.columns = [col.replace('-','_').replace('.','_') for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [col.strip('/').split('/')[-1].strip() for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' FPKM')[0])[' FPKM' in col] for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' TPM')[0])[' TPM' in col] for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' (')[0])[' total)' in col] for col in rna_data.columns]
# print(rna_data.columns)
if not output_filename:
output_filename = os.path.join(path, '{}{}{}.{}'.format(prefix, '_trimmed', ('', '_gene_name')[transcript_to_gene], suffix))
rna_data.to_csv(output_filename, sep='\t')
def convert_rpkm_to_tpm(rpkm_data):
"""
Given a trimmed DataFrame of RNA-seq data in RPKM (with genes on rows
and samples on columns), return a new dataframe the the RPKM values
converted to transcripts per million (TPM)
"""
return rpkm_data / rpkm_data.sum(axis=0) * 1e6
def filter_by_type(raw_data, length_threshold=200):
"""
Retain only protein-coding transcripts and ncRNA transcripts with length >= length_threshold (lncRNA)
"""
filtered_data = raw_data.loc[:, [raw_data.loc['Annotation/Divergence'][i].split('|')[-1] == 'protein-coding'
or (raw_data.loc['Annotation/Divergence'][i].split('|')[-1] == 'ncRNA'
and raw_data.loc['Length'][i] >= length_threshold) for i in range(raw_data.shape[1])]]
print('Initial transcripts: {}'.format(raw_data.shape[1]))
print('Retaining only protein-coding and ncRNA transcripts with length >= {}'.format(length_threshold))
print('\tRemoved {} transcripts'.format(raw_data.shape[1] - filtered_data.shape[1]))
print('{} transcripts remaining'.format(filtered_data.shape[1]))
return filtered_data
def filter_by_expression_magnitude(raw_data, magnitude_threshold=1):
"""
Remove any transcripts not expressed at at least <magnitude_threshold> in one or more samples.
"""
data_rows = raw_data.index[:]
print('Initial transcripts: {}'.format(raw_data.shape[1]))
filtered_data = raw_data.loc[:,(raw_data.loc[data_rows] >= magnitude_threshold).any(axis=0)]
print('Removed {} transcripts with magnitude < {} across all samples'.format(raw_data.shape[1] - filtered_data.shape[1], magnitude_threshold))
print('{} transcripts remaining'.format(filtered_data.shape[1]))
return filtered_data
def correlated_columns(df):
"""
Since the Pandas DataFrame.corr() method has stopped working, I create my own
"""
sample_corrs = pandas.DataFrame(numpy.zeros((df.shape[1], df.shape[1])), index=df.columns, columns=df.columns)
for col1, col2 in itertools.combinations(df.columns, 2):
pcc = scipy.stats.pearsonr(df[col1], df[col2])[0]
sample_corrs.loc[col1, col2] = pcc
sample_corrs.loc[col2, col1] = pcc
for col in df.columns:
sample_corrs.loc[col, col] = 1.0
return sample_corrs
def scatter_rna(rna_df, dataset1, dataset2, name1='', name2='', transform=None, stat_func=None, stat_func_name='', magnitude_threshold=0, threshold_type='',
cmap='', color='r', plot_size=4, marker_size=10, marker='o', units='Log_2 TPM', density_gamma=1, output_fname_prefix='',
lims=None, ticks=None, visible_ticks=True,
coloring_sets=None,
annotations=None, annotation_padding=0.2, annotation_color='k', annotation_font_size=8,
annotation_linewidth=1, show_diagonal=False, diagonal_kwargs={}, fig=None, ax=None):
"""
Generates a scatterplot of expression values between matched sequences of expression data :dataset1: and :dataset2:
:name1: label for dataset1
:name2: label for dataset2
:transform: (optional) a function to apply to every value in each dataset prior to plotting.
:stat_func: (optional) a summary statistical function which will be passed both datasets.
:stat_func_name: (optional) the name of the resulting statistic
:magnitude_threshold: (optional) only plot data above this threshold (after transformation, if any)
:threshold_type: (optional) can be 'and' or 'or'. For 'and', exclude any points which are not above the threshold in _both_ datasets.
For 'or' exclude any points below the threshold in _either_ dataset.
:cmap: (optional) the name of a built-in matplotlib colormap to use for a density-based coloring of points. If empty, just use a plain color
:color: (optional) if :cmap: is not specified, use this single color to render the points. Defaults to red.
:plot_size: (optional) the size of each figure dimension, in inches.
:marker_size: (optional) the size of each point marker, in points. Defaults to 10.
:marker: (optional) any valid matplotlib marker style to use for the point markers. Defaults to 'o' (filled circle).
:units: (optional) the name of the resulting units of expression that will be appended to each dataset name to label the axes. Defaults to 'Log_2 TPM'
:density_gamma: (optional) the density color mapping will be raised to this power. So numbers less than 1 reduce contrast and move values to the denser
end, and values greater than 1 increase contrast and move values to the sparser end.
:output_fname_prefix: (optional). If present, save a PNG and PDF having this prefix.
:lims: (optional): force the axes to have the specified range. If not specified, use the larger of the automatically-determined axis sizes.
:ticks: (optional): a sequence of locations to place ticks on both axes.
:coloring_sets: an iterable of tuples. Each tuple should consist of a color code paired with a list of genes to which the color should be applied. Not compatible with :cmap:.
:annotated_genes: an iterable of tuples containing (gene_name, x_offset, y_offset) where x and y offsetts give the coordinate shifts for the label relative to the gene location
:show_diagonal: Whether or not to draw a line across the diagonal. Default False.
:diagonal_kwargs: Keyword arguments to pass to the plot function that draws the diagonal.
:fig: (optional) matplotlib Figure object to use.
:ax: (optional) matplotlib Axes object to use.
"""
if (fig or ax) and (not fig and ax):
raise ValueError('If passing a fig or ax object, must pass both!')
if not (fig and ax):
seaborn.set_style('white')
fig, ax = plt.subplots(1, figsize=(plot_size,plot_size))
x_data = rna_df.loc[:,dataset1]
y_data = rna_df.loc[:,dataset2]
if not name1:
name1 = dataset1
if not name2:
name2 = dataset2
if transform:
x_data = transform(x_data)
y_data = transform(y_data)
if threshold_type == 'or':
# keep only genes with > threshold expression in at least one dataset
print('Keeping only transcripts with >= {} expression in at least one dataset'.format(magnitude_threshold))
kept_genes = set(x_data[x_data >= magnitude_threshold].index).union(set(y_data[y_data >= magnitude_threshold].index))
elif threshold_type == 'and':
print('Keeping only transcripts with >= {} expression in both datasets'.format(magnitude_threshold))
# keep only genes with > threshold expression in at least one dataset
kept_genes = set(x_data[x_data >= magnitude_threshold].index).intersection(set(y_data[y_data >= magnitude_threshold].index))
elif threshold_type == '':
kept_genes = rna_df.index
else:
raise ValueError('Unknown threshold type: {}'.format(threshold_type))
x_data = x_data.loc[kept_genes]
y_data = y_data.loc[kept_genes]
print('Kept {} transcripts, discarded {}.'.format(len(kept_genes), rna_df.shape[0] - len(kept_genes)))
if stat_func:
stat_result = stat_func(x_data, y_data)
if cmap:
xy = numpy.vstack([x_data,y_data])
z = scipy.stats.gaussian_kde(xy)(xy)**density_gamma
idx = z.argsort()
x_data, y_data, z = x_data[idx], y_data[idx], z[idx]
ax.scatter(x=x_data,
y=y_data,
marker=marker, cmap=cmap, c=z, s=marker_size, edgecolor='')
else:
if coloring_sets:
remaining_genes = set(kept_genes)
for set_color, set_genes in coloring_sets:
remaining_genes = remaining_genes.difference(set_genes)
# plot the remaining genes
ax.scatter(x=x_data.loc[remaining_genes],
y=y_data.loc[remaining_genes],
marker=marker, c=color, s=marker_size, edgecolor='')
for set_color, set_genes in coloring_sets:
ax.scatter(x=x_data.loc[set_genes],
y=y_data.loc[set_genes],
marker=marker, c=set_color, s=marker_size, edgecolor='')
else:
ax.scatter(x=x_data,
y=y_data,
marker=marker, c=color, s=marker_size, edgecolor='')
if annotations:
for gene_name, x_offset, y_offset in annotations:
if gene_name in x_data.index and gene_name in y_data.index:
gene_x = x_data[gene_name]
gene_y = y_data[gene_name]
# Compute padding components using Pythogorean theorem
pointer_length = numpy.sqrt(x_offset**2 + (y_offset)**2)
if pointer_length > annotation_padding * 2:
correction_factor = annotation_padding / pointer_length
padding_x = x_offset * correction_factor
padding_y = y_offset * correction_factor
else:
padding_x = 0
padding_y = 0
text_x = gene_x + x_offset
text_y = gene_y + y_offset
ax.text(x=text_x, y=text_y, s=gene_name, fontsize=annotation_font_size)
ax.plot((gene_x+padding_x, text_x - padding_x), (gene_y + padding_y, text_y-padding_y),
color=annotation_color, linewidth=annotation_linewidth)
ax.set_xlabel('{} {}'.format(name1, units))
ax.set_ylabel('{} {}'.format(name2, units))
# make axes square
if not lims:
biggest_lim = max(ax.get_ylim()[1], ax.get_xlim()[1])
lims = (0, biggest_lim)
ax.set_xlim(*lims)
ax.set_ylim(*lims)
if ticks:
ax.set_xticks(ticks)
ax.set_yticks(ticks)
plt.setp(ax.get_xticklabels(), visible=visible_ticks)
plt.setp(ax.get_yticklabels(), visible=visible_ticks)
if show_diagonal:
ax.plot(*lims, **diagonal_kwargs)
if stat_func:
print('{} vs {}, {}: {:>.3}'.format(name1, name2, stat_func_name, stat_func(x_data, y_data)))
ax.text(x=(ax.get_xlim()[1] - ax.get_xlim()[0]) * 0.1 + ax.get_xlim()[0],
y=(ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.9 + ax.get_ylim()[0],
s='{}: {:>.3}'.format(stat_func_name, stat_result))
if output_fname_prefix:
# toolbox.establish_path(toolbox.parse_path(output_fname_prefix)[0])
# Save plot
for fig_ext in FIG_EXTS:
figure_fname = '{}.{}'.format(output_fname_prefix, fig_ext)
print('Saving figure to {} ...'.format(figure_fname))
fig.savefig(figure_fname, bbox_inches='tight', dpi=PNG_DPI)
# Save data as CSV file
data_fname = '{}_data.csv'.format(output_fname_prefix)
print('Saving raw data to {}'.format(data_fname))
pandas.DataFrame({'{} ({})'.format(name1, units):x_data, '{} ({})'.format(name2, units):y_data}, index=x_data.index).to_csv(data_fname, index=False)
| mit | 1,190,259,456,043,873,800 | 45.620805 | 180 | 0.618081 | false |
IvarsKarpics/mxcube | gui/bricks/TreeBrick.py | 1 | 53816 | # Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
# import os
import logging
# from collections import namedtuple
from gui.BaseComponents import BaseWidget
from gui.utils import queue_item, Colors, QtImport
from gui.utils.sample_changer_helper import SC_STATE_COLOR, SampleChanger
from gui.widgets.dc_tree_widget import DataCollectTree
from HardwareRepository.HardwareObjects.queue_model_enumerables import CENTRING_METHOD
from HardwareRepository import HardwareRepository as HWR
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
__category__ = "General"
class TreeBrick(BaseWidget):
enable_widgets = QtImport.pyqtSignal(bool)
hide_sample_tab = QtImport.pyqtSignal(bool)
hide_dc_parameters_tab = QtImport.pyqtSignal(bool)
hide_sample_centring_tab = QtImport.pyqtSignal(bool)
hide_dcg_tab = QtImport.pyqtSignal(bool)
hide_sample_changer_tab = QtImport.pyqtSignal(bool)
hide_plate_manipulator_tab = QtImport.pyqtSignal(bool)
hide_char_parameters_tab = QtImport.pyqtSignal(bool)
hide_energy_scan_tab = QtImport.pyqtSignal(bool)
hide_xrf_spectrum_tab = QtImport.pyqtSignal(bool)
hide_workflow_tab = QtImport.pyqtSignal(bool)
hide_advanced_tab = QtImport.pyqtSignal(bool)
hide_xray_imaging_tab = QtImport.pyqtSignal(bool)
populate_dc_parameter_widget = QtImport.pyqtSignal(object)
populate_dc_group_widget = QtImport.pyqtSignal(object)
populate_char_parameter_widget = QtImport.pyqtSignal(object)
populate_sample_details = QtImport.pyqtSignal(object)
populate_energy_scan_widget = QtImport.pyqtSignal(object)
populate_xrf_spectrum_widget = QtImport.pyqtSignal(object)
populate_advanced_widget = QtImport.pyqtSignal(object)
populate_xray_imaging_widget = QtImport.pyqtSignal(object)
populate_workflow_widget = QtImport.pyqtSignal(object)
selection_changed = QtImport.pyqtSignal(object)
set_directory = QtImport.pyqtSignal(str)
set_prefix = QtImport.pyqtSignal(str)
set_sample = QtImport.pyqtSignal(object)
get_tree_brick = QtImport.pyqtSignal(BaseWidget)
diffractometer_ready = QtImport.pyqtSignal(bool)
sample_mount_started = QtImport.pyqtSignal()
sample_mount_finished = QtImport.pyqtSignal()
def __init__(self, *args):
BaseWidget.__init__(self, *args)
# Hardware objects ----------------------------------------------------
self.state_machine_hwobj = None
self.redis_client_hwobj = None
# Internal variables --------------------------------------------------
self.enable_collect_conditions = {}
self.current_view = None
self.current_queue_entry = None
self.is_logged_in = False
self.lims_samples = None
self.filtered_lims_samples = None
self.compression_state = True
self.queue_autosave_action = None
self.queue_undo_action = None
self.queue_redo_action = None
self.queue_sync_action = None
# Properties ----------------------------------------------------------
self.add_property("queue", "string", "/queue")
self.add_property("queue_model", "string", "/queue-model")
self.add_property("xml_rpc_server", "string", "/xml_rpc_server")
self.add_property("redis_client", "string", "")
self.add_property("useFilterWidget", "boolean", True)
self.add_property("useSampleWidget", "boolean", True)
self.add_property("scOneName", "string", "Sample changer")
self.add_property("scTwoName", "string", "Plate")
self.add_property("usePlateNavigator", "boolean", False)
self.add_property("useHistoryView", "boolean", True)
self.add_property("useCentringMethods", "boolean", True)
self.add_property("enableQueueAutoSave", "boolean", True)
# Properties to initialize hardware objects --------------------------
self.add_property("hwobj_state_machine", "string", "")
# Signals ------------------------------------------------------------
self.define_signal("enable_widgets", ())
self.define_signal("diffractometer_ready", ())
# Hiding and showing the tabs
self.define_signal("hide_sample_tab", ())
self.define_signal("hide_dc_parameters_tab", ())
self.define_signal("hide_sample_centring_tab", ())
self.define_signal("hide_dcg_tab", ())
self.define_signal("hide_sample_changer_tab", ())
self.define_signal("hide_plate_manipulator_tab", ())
self.define_signal("hide_char_parameters_tab", ())
self.define_signal("hide_energy_scan_tab", ())
self.define_signal("hide_xrf_spectrum_tab", ())
self.define_signal("hide_workflow_tab", ())
self.define_signal("hide_advanced_tab", ())
self.define_signal("hide_xray_imaging_tab", ())
self.define_signal("populate_dc_parameter_widget", ())
self.define_signal("populate_dc_group_widget", ())
self.define_signal("populate_char_parameter_widget", ())
self.define_signal("populate_sample_details", ())
self.define_signal("populate_energy_scan_widget", ())
self.define_signal("populate_xrf_spectrum_widget", ())
self.define_signal("populate_workflow_tab", ())
self.define_signal("populate_advanced_widget", ())
self.define_signal("populate_xray_imaging_widget", ())
self.define_signal("selection_changed", ())
self.define_signal("set_directory", ())
self.define_signal("set_prefix", ())
self.define_signal("set_sample", ())
self.define_signal("get_tree_brick", ())
self.define_signal("sample_mount_started", ())
self.define_signal("sample_mount_finished", ())
# Slots ---------------------------------------------------------------
self.define_slot("logged_in", ())
self.define_slot("status_msg_changed", ())
self.define_slot("sample_load_state_changed", ())
self.define_slot("set_session", ())
self.define_slot("get_selected_samples", ())
self.define_slot("set_requested_tree_brick", ())
# Graphic elements ----------------------------------------------------
self.tools_menu = None
self.queue_sync_action = None
self.sample_changer_widget = QtImport.load_ui_file(
"sample_changer_widget_layout.ui"
)
# self.refresh_pixmap = Icons.load("Refresh2.png")
# self.sample_changer_widget.synch_button.setIcon(QtGui.QIcon(self.refresh_pixmap))
# self.sample_changer_widget.synch_button.setText("Synch ISPyB")
self.dc_tree_widget = DataCollectTree(self)
self.dc_tree_widget.selection_changed_cb = self.selection_changed_cb
self.dc_tree_widget.run_cb = self.run
# self.dc_tree_widget.clear_centred_positions_cb = \
# self.clear_centred_positions
# Layout --------------------------------------------------------------
__main_layout = QtImport.QVBoxLayout(self)
__main_layout.addWidget(self.sample_changer_widget)
__main_layout.addWidget(self.dc_tree_widget)
__main_layout.setSpacing(0)
__main_layout.setContentsMargins(0, 0, 0, 0)
# SizePolicies --------------------------------------------------------
# Qt signal/slot connections ------------------------------------------
self.sample_changer_widget.details_button.clicked.connect(
self.toggle_sample_changer_tab
)
self.sample_changer_widget.filter_cbox.activated.connect(
self.mount_mode_combo_changed
)
self.sample_changer_widget.centring_cbox.activated.connect(
self.dc_tree_widget.set_centring_method
)
self.sample_changer_widget.synch_ispyb_button.clicked.connect(
self.refresh_sample_list
)
# self.sample_changer_widget.tree_options_button.clicked.connect(\
# self.open_tree_options_dialog)
self.sample_changer_widget.filter_combo.activated.connect(
self.filter_combo_changed
)
self.sample_changer_widget.filter_ledit.textChanged.connect(
self.filter_text_changed
)
self.sample_changer_widget.sample_combo.activated.connect(
self.sample_combo_changed
)
# Other ---------------------------------------------------------------
self.enable_collect(True)
self.sample_changer_widget.synch_ispyb_button.setEnabled(False)
#self.setSizePolicy(QtImport.QSizePolicy.Maximum, QtImport.QSizePolicy.Expanding)
if HWR.beamline.sample_changer is not None:
self.connect(
HWR.beamline.sample_changer,
SampleChanger.STATE_CHANGED_EVENT,
self.sample_load_state_changed,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.SELECTION_CHANGED_EVENT,
self.sample_selection_changed,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.INFO_CHANGED_EVENT,
self.set_sample_pin_icon,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.STATUS_CHANGED_EVENT,
self.sample_changer_status_changed,
)
else:
logging.getLogger("HWR").debug(
"TreeBrick: Sample changer not available."
)
if HWR.beamline.plate_manipulator is not None:
self.connect(
HWR.beamline.plate_manipulator,
SampleChanger.STATE_CHANGED_EVENT,
self.sample_load_state_changed,
)
self.connect(
HWR.beamline.plate_manipulator,
SampleChanger.INFO_CHANGED_EVENT,
self.plate_info_changed,
)
else:
logging.getLogger("GUI").debug(
"TreeBrick: plate manipulator hwobj not defined."
)
self.connect(
HWR.beamline.sample_view, "shapeCreated", self.dc_tree_widget.shape_created
)
self.connect(
HWR.beamline.sample_view,
"shapeChanged",
self.dc_tree_widget.shape_changed
)
self.connect(
HWR.beamline.sample_view, "shapeDeleted", self.dc_tree_widget.shape_deleted
)
self.connect(
HWR.beamline.sample_view,
"diffractometerReady",
self.diffractometer_ready_changed
)
self.connect(
HWR.beamline.diffractometer,
"newAutomaticCentringPoint",
self.diffractometer_automatic_centring_done,
)
self.connect(
HWR.beamline.diffractometer,
"minidiffPhaseChanged",
self.diffractometer_phase_changed,
)
self.diffractometer_phase_changed(
HWR.beamline.diffractometer.get_current_phase()
)
self.connect(
HWR.beamline.queue_manager,
"show_workflow_tab",
self.show_workflow_tab_from_model
)
self.connect(
HWR.beamline.queue_manager,
"queue_entry_execute_started",
self.queue_entry_execution_started,
)
self.connect(
HWR.beamline.queue_manager,
"queue_entry_execute_finished",
self.queue_entry_execution_finished,
)
self.connect(HWR.beamline.queue_manager, "queue_paused", self.queue_paused_handler)
self.connect(
HWR.beamline.queue_manager, "queue_execution_finished", self.queue_execution_finished
)
self.connect(HWR.beamline.queue_manager, "queue_stopped", self.queue_stop_handler)
self.connect(HWR.beamline.queue_model, "child_added", self.dc_tree_widget.add_to_view)
if hasattr(HWR.beamline, "ppu_control"):
self.connect(
HWR.beamline.ppu_control,
"ppuStatusChanged",
self.ppu_status_changed,
)
if HWR.beamline.safety_shutter is not None:
self.connect(
HWR.beamline.safety_shutter, "shutterStateChanged", self.shutter_state_changed
)
if HWR.beamline.machine_info is not None:
self.connect(
HWR.beamline.machine_info, "machineCurrentChanged", self.machine_current_changed
)
has_shutter_less = HWR.beamline.detector.has_shutterless()
if has_shutter_less:
self.dc_tree_widget.confirm_dialog.disable_dark_current_cbx()
def run(self):
"""Adds save, load and auto save menus to the menubar
Emits signals to close tabs"""
self.tools_menu = QtImport.QMenu("Queue", self)
self.tools_menu.addAction("Save", self.save_queue)
self.tools_menu.addAction("Load", self.load_queue)
self.queue_autosave_action = self.tools_menu.addAction(
"Auto save", self.queue_autosave_clicked
)
self.queue_autosave_action.setCheckable(True)
self.queue_autosave_action.setChecked(self["enableQueueAutoSave"])
self.queue_autosave_action.setEnabled(self["enableQueueAutoSave"])
self.tools_menu.addSeparator()
self.queue_undo_action = self.tools_menu.addAction(
"Undo last action", self.queue_undo_clicked
)
self.queue_undo_action.setEnabled(False)
self.queue_redo_action = self.tools_menu.addAction(
"Redo last action", self.queue_redo_clicked
)
self.queue_redo_action.setEnabled(False)
self.tools_menu.addSeparator()
self.queue_sync_action = self.tools_menu.addAction(
"Sync with ISPyB", self.queue_sync_clicked
)
self.queue_sync_action.setEnabled(False)
if BaseWidget._menubar is not None:
BaseWidget._menubar.insert_menu(self.tools_menu, 1)
self.hide_dc_parameters_tab.emit(True)
self.hide_dcg_tab.emit(True)
self.hide_sample_centring_tab.emit(False)
self.hide_char_parameters_tab.emit(True)
self.hide_sample_changer_tab.emit(True)
self.hide_plate_manipulator_tab.emit(True)
self.hide_sample_tab.emit(True)
self.hide_energy_scan_tab.emit(True)
self.hide_xrf_spectrum_tab.emit(True)
self.hide_workflow_tab.emit(True)
self.hide_advanced_tab.emit(True)
def property_changed(self, property_name, old_value, new_value):
if property_name == "useFilterWidget":
self.sample_changer_widget.filter_label.setVisible(new_value)
self.sample_changer_widget.filter_ledit.setVisible(new_value)
self.sample_changer_widget.filter_combo.setVisible(new_value)
elif property_name == "useSampleWidget":
self.sample_changer_widget.sample_label.setVisible(new_value)
self.sample_changer_widget.sample_combo.setVisible(new_value)
elif property_name == "useCentringMethods":
self.sample_changer_widget.centring_cbox.setEnabled(new_value)
self.sample_changer_widget.centring_mode_label.setEnabled(new_value)
elif property_name == "xml_rpc_server":
xml_rpc_server_hwobj = self.get_hardware_object(new_value)
if xml_rpc_server_hwobj:
self.connect(xml_rpc_server_hwobj, "add_to_queue", self.add_to_queue)
self.connect(
xml_rpc_server_hwobj,
"start_queue",
self.dc_tree_widget.collect_items,
)
self.connect(
xml_rpc_server_hwobj, "open_dialog", self.open_xmlrpc_dialog
)
elif property_name == "hwobj_state_machine":
self.state_machine_hwobj = self.get_hardware_object(
new_value, optional=True
)
elif property_name == "redis_client":
self.redis_client_hwobj = self.get_hardware_object(new_value, optional=True)
elif property_name == "scOneName":
self.sample_changer_widget.filter_cbox.setItemText(1, new_value)
elif property_name == "scTwoName":
self.sample_changer_widget.filter_cbox.setItemText(2, new_value)
elif property_name == "usePlateNavigator":
self.dc_tree_widget.plate_navigator_cbox.setVisible(new_value)
elif property_name == "useHistoryView":
# self.dc_tree_widget.history_tree_widget.setVisible(new_value)
self.dc_tree_widget.history_enable_cbox.setVisible(new_value)
else:
BaseWidget.property_changed(self, property_name, old_value, new_value)
@QtImport.pyqtSlot(int, str, str, int, str, str, bool)
def set_session(
self,
session_id,
t_prop_code=None,
prop_number=None,
prop_id=None,
start_date=None,
prop_code=None,
is_inhouse=None,
):
HWR.beamline.session.set_session_start_date(str(start_date))
@QtImport.pyqtSlot()
def set_requested_tree_brick(self):
self.get_tree_brick.emit(self)
@QtImport.pyqtSlot(bool)
def logged_in(self, logged_in):
"""
Connected to the signal loggedIn of ProposalBrick2.
The signal is emitted when a user was succesfully logged in.
At first free-pin mode is created
Then it tries to initialize two sample changers and create
two associated queue models.
"""
self.is_logged_in = logged_in
# self.enable_collect(logged_in)
# if not logged_in:
if True:
self.dc_tree_widget.sample_mount_method = 0
self.dc_tree_widget.populate_free_pin()
self.dc_tree_widget.plate_navigator_cbox.setVisible(False)
if (
HWR.beamline.sample_changer is not None
and HWR.beamline.diffractometer.use_sample_changer()
):
sc_basket_content, sc_sample_content = self.get_sc_content()
if sc_basket_content and sc_sample_content:
sc_basket_list, sc_sample_list = self.dc_tree_widget.samples_from_sc_content(
sc_basket_content, sc_sample_content
)
self.dc_tree_widget.sample_mount_method = 1
self.dc_tree_widget.populate_tree_widget(
sc_basket_list,
sc_sample_list,
self.dc_tree_widget.sample_mount_method,
)
self.sample_changer_widget.details_button.setText("Show SC-details")
if (
HWR.beamline.plate_manipulator is not None
and HWR.beamline.diffractometer.in_plate_mode()
):
if self["usePlateNavigator"]:
self.dc_tree_widget.plate_navigator_cbox.setVisible(True)
plate_row_content, plate_sample_content = self.get_plate_content()
if plate_sample_content:
plate_row_list, plate_sample_list = self.dc_tree_widget.samples_from_sc_content(
plate_row_content, plate_sample_content
)
self.dc_tree_widget.sample_mount_method = 2
self.dc_tree_widget.populate_tree_widget(
plate_row_list,
plate_sample_list,
self.dc_tree_widget.sample_mount_method,
)
self.sample_changer_widget.details_button.setText(
"Show Plate-details"
)
self.sample_changer_widget.filter_cbox.setCurrentIndex(
self.dc_tree_widget.sample_mount_method
)
self.dc_tree_widget.filter_sample_list(
self.dc_tree_widget.sample_mount_method
)
if self.dc_tree_widget.sample_mount_method > 0:
# Enable buttons related to sample changer
self.sample_changer_widget.filter_cbox.setEnabled(True)
self.sample_changer_widget.details_button.setEnabled(True)
self.dc_tree_widget.scroll_to_item()
if self.dc_tree_widget.sample_mount_method < 2 and logged_in:
self.sample_changer_widget.synch_ispyb_button.setEnabled(True)
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.load_graphics()
self.load_queue()
self.dc_tree_widget.samples_initialized = True
# if not self.dc_tree_widget.samples_initialized
# self.dc_tree_widget.sample_tree_widget_selection()
# self.dc_tree_widget.set_sample_pin_icon()
# self.dc_tree_widget.scroll_to_item()
self.dc_tree_widget.update_basket_selection()
def enable_collect(self, state):
"""
Enables the collect controls.
:param state: Enable if state is True and disable if False
:type state: bool
:returns: None
"""
self.dc_tree_widget.enable_collect(state)
def queue_entry_execution_started(self, queue_entry):
self.current_queue_entry = queue_entry
self.enable_widgets.emit(False)
self.dc_tree_widget.queue_entry_execution_started(queue_entry)
# BaseWidget.set_status_info("status", "Queue started", "running")
def queue_entry_execution_finished(self, queue_entry, status):
self.current_queue_entry = None
self.dc_tree_widget.queue_entry_execution_finished(queue_entry, status)
self.enable_widgets.emit(True)
if queue_entry.get_type_str() not in ["Sample", "Basket", ""]:
BaseWidget.set_status_info(
"collect", "%s : %s" % (queue_entry.get_type_str(), status)
)
def queue_paused_handler(self, status):
self.enable_widgets.emit(True)
self.dc_tree_widget.queue_paused_handler(status)
def queue_execution_finished(self, status):
# self.enable_widgets.emit(True)
self.current_queue_entry = None
self.dc_tree_widget.queue_execution_completed(status)
def queue_stop_handler(self, status):
self.enable_widgets.emit(True)
self.dc_tree_widget.queue_stop_handler(status)
# BaseWidget.set_status_info("status", "Queue stoped")
def diffractometer_ready_changed(self, status):
self.diffractometer_ready.emit(HWR.beamline.diffractometer.is_ready())
try:
info_message = HWR.beamline.diffractometer.get_status()
except AttributeError:
info_message = None
if info_message is None and status:
info_message = "Ready"
info_status = "ready"
elif info_message is None:
info_message = "Not ready"
info_status = "running"
else:
info_status = "ready"
BaseWidget.set_status_info("diffractometer", info_message, info_status)
def diffractometer_automatic_centring_done(self, point):
if self.dc_tree_widget.centring_method == CENTRING_METHOD.LOOP:
message_box = QtImport.QMessageBox()
message_box.setIcon(QtImport.QMessageBox.Question)
message_box.setWindowTitle("Optical centring with user confirmation.")
message_box.setText("Optical centring done. How to proceed?")
message_box.addButton("Accept result", QtImport.QMessageBox.ApplyRole)
message_box.addButton("Try again", QtImport.QMessageBox.RejectRole)
if self.current_queue_entry:
message_box.addButton(
"Skip following entry", QtImport.QMessageBox.NoRole
)
result = message_box.exec_()
if result == QtImport.QMessageBox.AcceptRole:
HWR.beamline.diffractometer.automatic_centring_try_count = 0
elif result == QtImport.QMessageBox.RejectRole:
logging.getLogger("GUI").info(
"Optical centring result rejected. " + "Trying once again."
)
else:
HWR.beamline.diffractometer.automatic_centring_try_count = 0
if self.current_queue_entry:
logging.getLogger("GUI").info(
"Optical centring rejected "
+ "and the following queue entries skipped"
)
task_group_entry = self.current_queue_entry.get_container()
for child_entry in task_group_entry.get_queue_entry_list():
child_entry.set_enabled(False)
def samples_from_lims(self, samples):
barcode_samples, location_samples = self.dc_tree_widget.samples_from_lims(
samples
)
l_samples = dict()
# TODO: add test for sample changer type, here code is for Robodiff only
for location, l_sample in location_samples.items():
if l_sample.lims_location != (None, None):
basket, sample = l_sample.lims_location
cell = int(round((basket + 0.5) / 3.0))
puck = basket - 3 * (cell - 1)
new_location = (cell, puck, sample)
l_sample.lims_location = new_location
l_samples[new_location] = l_sample
name = l_sample.get_name()
l_sample.init_from_sc_sample([new_location])
l_sample.set_name(name)
return barcode_samples, l_samples
def refresh_sample_list(self):
"""
Retrives sample information from ISPyB and populates the sample list
accordingly.
"""
log = logging.getLogger("user_level_log")
self.lims_samples = HWR.beamline.lims.get_samples(
HWR.beamline.session.proposal_id, HWR.beamline.session.session_id
)
basket_list = []
sample_list = []
self.filtered_lims_samples = []
sample_changer = None
self.sample_changer_widget.sample_combo.clear()
for sample in self.lims_samples:
try:
if sample.containerSampleChangerLocation:
self.filtered_lims_samples.append(sample)
item_text = "%s-%s" % (sample.proteinAcronym, sample.sampleName)
self.sample_changer_widget.sample_combo.addItem(item_text)
except BaseException:
pass
self.sample_changer_widget.sample_label.setEnabled(True)
self.sample_changer_widget.sample_combo.setEnabled(True)
self.sample_changer_widget.sample_combo.setCurrentIndex(-1)
if self.dc_tree_widget.sample_mount_method == 1:
sample_changer = HWR.beamline.sample_changer
elif self.dc_tree_widget.sample_mount_method == 2:
sample_changer = HWR.beamline.plate_manipulator
# if len(self.lims_samples) == 0:
# log.warning("No sample available in LIMS")
# self.mount_mode_combo_changed(self.sample_changer_widget.filter_cbox.currentIndex())
# return
if sample_changer is not None:
(barcode_samples, location_samples) = self.dc_tree_widget.samples_from_lims(
self.lims_samples
)
sc_basket_content, sc_sample_content = self.get_sc_content()
sc_basket_list, sc_sample_list = self.dc_tree_widget.samples_from_sc_content(
sc_basket_content, sc_sample_content
)
basket_list = sc_basket_list
# self.queue_sync_action.setEnabled(True)
for sc_sample in sc_sample_list:
# Get the sample in lims with the barcode
# sc_sample.code
lims_sample = barcode_samples.get(sc_sample.code)
# There was a sample with that barcode
if lims_sample:
if lims_sample.lims_location == sc_sample.location:
log.debug(
"Found sample in ISPyB for location %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
log.warning(
"The sample with the barcode (%s) exists" % sc_sample.code
+ " in LIMS but the location does not mat"
+ "ch. Sample changer location: %s, LIMS "
% sc_sample.location
+ "location %s" % lims_sample.lims_location
)
sample_list.append(sc_sample)
else: # No sample with that barcode, continue with location
lims_sample = location_samples.get(sc_sample.location)
if lims_sample:
if lims_sample.lims_code:
log.warning(
"The sample has a barcode in LIMS, but "
+ "the SC has no barcode information for "
+ "this sample. For location: %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
log.debug(
"Found sample in ISPyB for location %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
if lims_sample:
if lims_sample.lims_location is not None:
log.warning(
"No barcode was provided in ISPyB "
+ "which makes it impossible to verify if"
+ "the locations are correct, assuming "
+ "that the positions are correct."
)
sample_list.append(lims_sample)
else:
# log.warning("No sample in ISPyB for location %s" % \
# str(sc_sample.location))
sample_list.append(sc_sample)
self.dc_tree_widget.populate_tree_widget(
basket_list, sample_list, self.dc_tree_widget.sample_mount_method
)
self.dc_tree_widget.de_select_items()
def sample_combo_changed(self, index):
"""
Assigns lims sample to manually-mounted sample
"""
self.dc_tree_widget.filter_sample_list(0)
root_model = HWR.beamline.queue_model.get_model_root()
sample_model = root_model.get_children()[0]
sample_model.init_from_lims_object(self.filtered_lims_samples[index])
self.dc_tree_widget.sample_tree_widget.clear()
self.dc_tree_widget.populate_free_pin(sample_model)
def get_sc_content(self):
"""
Gets the 'raw' data from the sample changer.
:returns: A list with tuples, containing the sample information.
"""
sc_basket_content = []
sc_sample_content = []
for basket in HWR.beamline.sample_changer.get_basket_list():
basket_index = basket.get_index()
basket_name = basket.get_name()
sc_basket_content.append((basket_index + 1, basket, basket_name))
for sample in HWR.beamline.sample_changer.get_sample_list():
matrix = sample.get_id() or ""
basket_index = sample.get_container().get_index()
sample_index = sample.get_index()
sample_name = sample.get_name()
sc_sample_content.append(
(matrix, basket_index + 1, sample_index + 1, sample_name)
)
return sc_basket_content, sc_sample_content
def get_plate_content(self):
"""
"""
plate_row_content = []
plate_sample_content = []
for row in HWR.beamline.plate_manipulator.get_basket_list():
row_index = row.get_index()
row_name = row.get_name()
plate_row_content.append((row_index, row, row_name))
for sample in HWR.beamline.plate_manipulator.get_sample_list():
row_index = sample.get_cell().get_row_index()
sample_name = sample.get_name()
coords = sample.get_coords()
matrix = sample.get_id() or ""
plate_sample_content.append((matrix, coords[0], coords[1], sample_name))
return plate_row_content, plate_sample_content
def status_msg_changed(self, msg, color):
"""
Status message from the SampleChangerBrick.
:param msg: The message
:type msg: str
:returns: None
"""
logging.getLogger("GUI").info(msg)
def set_sample_pin_icon(self):
"""
Updates the location of the sample pin when the
matrix code information changes. The matrix code information
is updated, but not exclusively, when a sample is changed.
"""
self.dc_tree_widget.set_sample_pin_icon()
def sample_load_state_changed(self, state, *args):
"""
The state in the sample loading procedure changed.
Ie from Loading to mounted
:param state: str (Enumerable)
:returns: None
"""
s_color = SC_STATE_COLOR.get(state, "UNKNOWN")
Colors.set_widget_color(
self.sample_changer_widget.details_button, QtImport.QColor(s_color)
)
self.dc_tree_widget.scroll_to_item()
if HWR.beamline.diffractometer.in_plate_mode():
self.dc_tree_widget.plate_navigator_widget.refresh_plate_location()
def sample_selection_changed(self):
"""
Updates the selection of pucks. Method is called when the selection
of pucks in the dewar has been changed.
"""
self.dc_tree_widget.update_basket_selection()
def sample_changer_status_changed(self, state):
BaseWidget.set_status_info("sc", state)
def plate_info_changed(self):
self.set_sample_pin_icon()
self.dc_tree_widget.plate_navigator_widget.refresh_plate_location()
self.dc_tree_widget.scroll_to_item()
def show_sample_centring_tab(self):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_sample_centring_tab.emit(False)
def show_sample_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_sample_tab.emit(False)
def show_dcg_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_dcg_tab.emit(False)
self.populate_dc_group_tab(item)
def populate_dc_group_tab(self, item=None):
self.populate_dc_group_widget.emit(item)
def show_datacollection_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_dc_parameters_tab.emit(False)
self.populate_dc_parameters_tab(item)
def populate_dc_parameters_tab(self, item=None):
self.populate_dc_parameter_widget.emit(item)
def show_char_parameters_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_char_parameters_tab.emit(False)
def populate_char_parameters_tab(self, item):
self.populate_char_parameter_widget.emit(item)
def show_energy_scan_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_energy_scan_tab.emit(False)
self.populate_energy_scan_tab(item)
def populate_energy_scan_tab(self, item):
self.populate_energy_scan_widget.emit(item)
def show_xrf_spectrum_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_xrf_spectrum_tab.emit(False)
self.populate_xrf_spectrum_tab(item)
def populate_xrf_spectrum_tab(self, item):
self.populate_xrf_spectrum_widget.emit(item)
def show_advanced_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_advanced_tab.emit(False)
self.populate_advanced_tab(item)
def populate_advanced_tab(self, item):
self.populate_advanced_widget.emit(item)
def show_workflow_tab_from_model(self):
self.show_workflow_tab(None)
def show_workflow_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
running = HWR.beamline.queue_manager.is_executing()
self.populate_workflow_tab(item, running=running)
def populate_workflow_tab(self, item, running=False):
self.populate_workflow_widget.emit((item, running))
def show_xray_imaging_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_xray_imaging_tab.emit(False)
self.populate_xray_imaging_tab(item)
def populate_xray_imaging_tab(self, item):
self.populate_xray_imaging_widget.emit(item)
def mount_mode_combo_changed(self, index):
self.dc_tree_widget.filter_sample_list(index)
self.sample_changer_widget.details_button.setEnabled(index > 0)
self.sample_changer_widget.synch_ispyb_button.setEnabled(
index < 2 and self.is_logged_in
)
# self.sample_changer_widget.sample_label.setEnabled(False)
# self.sample_changer_widget.sample_combo.setEnabled(index == 0)
if index == 0:
self.hide_sample_changer_tab.emit(True)
self.hide_plate_manipulator_tab.emit(True)
def toggle_sample_changer_tab(self):
if self.current_view == self.sample_changer_widget:
self.current_view = None
if self.dc_tree_widget.sample_mount_method == 1:
self.hide_sample_changer_tab.emit(True)
self.sample_changer_widget.details_button.setText("Show SC-details")
else:
self.hide_plate_manipulator_tab.emit(True)
self.sample_changer_widget.details_button.setText("Show Plate-details")
self.dc_tree_widget.sample_tree_widget_selection()
else:
self.current_view = self.sample_changer_widget
self.hide_dc_parameters_tab.emit(True)
self.hide_dcg_tab.emit(True)
if self.dc_tree_widget.sample_mount_method == 1:
self.hide_sample_changer_tab.emit(False)
self.sample_changer_widget.details_button.setText("Hide SC-details")
else:
self.hide_plate_manipulator_tab.emit(False)
self.sample_changer_widget.details_button.setText("Hide Plate-details")
self.hide_sample_tab.emit(True)
def selection_changed_cb(self, items):
if len(items) == 1:
item = items[0]
if isinstance(item, queue_item.SampleQueueItem):
self.populate_sample_details.emit(item.get_model())
self.emit_set_sample(item)
self.emit_set_directory()
self.emit_set_prefix(item)
# self.populate_edna_parameter_widget(item)
elif isinstance(item, queue_item.DataCollectionQueueItem):
data_collection = item.get_model()
if data_collection.is_mesh():
self.populate_advanced_tab(item)
else:
self.populate_dc_parameters_tab(item)
elif isinstance(item, queue_item.CharacterisationQueueItem):
self.populate_char_parameters_tab(item)
elif isinstance(item, queue_item.EnergyScanQueueItem):
self.populate_energy_scan_tab(item)
elif isinstance(item, queue_item.XRFSpectrumQueueItem):
self.populate_xrf_spectrum_tab(item)
elif isinstance(item, queue_item.GenericWorkflowQueueItem):
self.populate_workflow_tab(item)
elif isinstance(item, queue_item.DataCollectionGroupQueueItem):
self.populate_dc_group_tab(item)
elif isinstance(item, queue_item.XrayCenteringQueueItem):
self.populate_advanced_tab(item)
elif isinstance(item, queue_item.XrayImagingQueueItem):
self.populate_xray_imaging_tab(item)
self.selection_changed.emit(items)
def emit_set_directory(self):
directory = str(HWR.beamline.session.get_base_image_directory())
self.set_directory.emit(directory)
def emit_set_prefix(self, item):
prefix = HWR.beamline.session.get_default_prefix(item.get_model())
self.set_prefix.emit(prefix)
def emit_set_sample(self, item):
self.set_sample.emit(item)
def get_selected_items(self):
items = self.dc_tree_widget.get_selected_items()
return items
def add_to_queue(self, task_list, parent_tree_item=None, set_on=True):
if not parent_tree_item:
parent_tree_item = self.dc_tree_widget.get_mounted_sample_item()
self.dc_tree_widget.add_to_queue(task_list, parent_tree_item, set_on)
def open_xmlrpc_dialog(self, dialog_dict):
QtImport.QMessageBox.information(
self,
"Message from beamline operator",
dialog_dict["msg"],
QtImport.QMessageBox.Ok,
)
def select_last_added_item(self):
self.dc_tree_widget.select_last_added_item()
def filter_combo_changed(self, filter_index):
"""Filters sample treewidget based on the selected filter criteria:
0 : No filter
1 : Star
2 : Sample name
3 : Protein name
4 : Basket index
5 : Executed
6 : Not executed
7 : OSC
8 : Helical
9 : Characterisation
10: Energy Scan
11: XRF spectrum
"""
self.sample_changer_widget.filter_ledit.setEnabled(filter_index in (2, 3, 4))
self.clear_filter()
if filter_index > 0:
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
while item:
hide = False
item_model = item.get_model()
if filter_index == 1:
hide = not item.has_star()
elif filter_index == 5:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = not item_model.is_executed()
elif filter_index == 6:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = item_model.is_executed()
elif filter_index == 7:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = item_model.is_helical()
else:
hide = True
elif filter_index == 8:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = not item_model.is_helical()
else:
hide = True
elif filter_index == 9:
hide = not isinstance(item, queue_item.CharacterisationQueueItem)
elif filter_index == 10:
hide = not isinstance(item, queue_item.EnergyScanQueueItem)
elif filter_index == 11:
hide = not isinstance(item, queue_item.XRFSpectrumQueueItem)
# elif filter_index == 11:
# hide = not isinstance(item, queue_item.AdvancedQueueItem)
if type(item) not in (
queue_item.TaskQueueItem,
queue_item.SampleQueueItem,
queue_item.BasketQueueItem,
queue_item.DataCollectionGroupQueueItem,
):
item.set_hidden(hide)
item_iterator += 1
item = item_iterator.value()
self.dc_tree_widget.hide_empty_baskets()
def filter_text_changed(self, new_text):
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
filter_index = self.sample_changer_widget.filter_combo.currentIndex()
while item:
hide = False
new_text = str(new_text)
if filter_index == 2:
if isinstance(item, queue_item.SampleQueueItem):
hide = not new_text in item.text(0)
elif filter_index == 3:
if isinstance(item, queue_item.SampleQueueItem):
hide = not new_text in item.get_model().crystals[0].protein_acronym
elif filter_index == 4:
if isinstance(item, queue_item.BasketQueueItem):
if new_text.isdigit():
# Display one basket
hide = int(new_text) != item.get_model().location[0]
else:
# Display several baskets. Separated with ","
enable_baskat_list = new_text.split(",")
if len(enable_baskat_list) > 1:
hide = (
item.get_model().location[0] not in enable_baskat_list
)
item.set_hidden(hide)
item_iterator += 1
item = item_iterator.value()
if filter_index != 3:
self.dc_tree_widget.hide_empty_baskets()
def clear_filter(self):
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
while item:
item.set_hidden(False)
item_iterator += 1
item = item_iterator.value()
def diffractometer_phase_changed(self, phase):
if self.enable_collect_conditions.get("diffractometer") != (
phase != "BeamLocation"
):
self.enable_collect_conditions["diffractometer"] = phase != "BeamLocation"
if phase:
self.update_enable_collect()
def ppu_status_changed(self, in_error, status_msg):
if self.enable_collect_conditions.get("ppu") != (in_error != True):
self.enable_collect_conditions["ppu"] = in_error != True
self.update_enable_collect()
def shutter_state_changed(self, state, msg=None):
# NBNB TODO HACK.
# Necessary because shutter states can be both 'opened', 'OPEN'. (and more?)
# NBNB fixme
#is_open = bool(state and state.lower().startswith('open'))
is_open = bool(state and state.lower().startswith('open'))
if self.enable_collect_conditions.get("shutter") != is_open:
self.enable_collect_conditions["shutter"] = is_open
self.update_enable_collect()
def machine_current_changed(self, value, in_range):
return
if self.enable_collect_conditions.get("machine_current") != in_range:
self.enable_collect_conditions["machine_current"] = in_range
self.update_enable_collect()
def update_enable_collect(self):
if self.current_queue_entry is not None:
#Do not enable/disable collect button if queue is executing
return
# Do not allow to start xray imaging from BeamLocation and DataCollection phase
self.enable_collect_conditions["imaging"] = True
for item in self.get_selected_items():
if isinstance(
item, queue_item.XrayImagingQueueItem
) and HWR.beamline.diffractometer.get_current_phase() in (
"BeamLocation",
"DataCollection",
):
self.enable_collect_conditions["imaging"] = False
enable_collect = all(
item == True for item in self.enable_collect_conditions.values()
)
if enable_collect != self.dc_tree_widget.enable_collect_condition:
if enable_collect:
logging.getLogger("GUI").info("Data collection is enabled")
else:
msg = ""
logging.getLogger("GUI").warning("Data collect is disabled")
for key, value in self.enable_collect_conditions.items():
if value == False:
if key == "diffractometer":
logging.getLogger("GUI").warning(
" - Diffractometer is in beam location phase"
)
elif key == "shutter":
logging.getLogger("GUI").warning(
" - Safety shutter is closed "
+ "(Open the safety shutter to enable collections)"
)
elif key == "ppu":
logging.getLogger("GUI").error(" - PPU is in error state")
elif key == "machine_current":
logging.getLogger("GUI").error(
" - Machine current is to low "
+ "(Wait till the machine current reaches 90 mA)"
)
elif key == "imaging":
logging.getLogger("GUI").warning(
"To start an imaging collection "
+ "diffractometer has to be in SampleCentering or in Transfer phase"
)
self.dc_tree_widget.enable_collect_condition = enable_collect
self.dc_tree_widget.toggle_collect_button_enabled()
def save_queue(self):
"""Saves queue in the file"""
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.save_queue()
# else:
# self.dc_tree_widget.save_queue()
def auto_save_queue(self):
"""Saves queue in the file"""
if self.queue_autosave_action is not None:
if (
self.queue_autosave_action.isChecked()
and self.dc_tree_widget.samples_initialized
):
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.save_queue()
# else:
# self.dc_tree_widget.save_queue()
def load_queue(self):
"""Loads queue from file"""
loaded_model = None
if self.redis_client_hwobj is not None:
loaded_model = self.redis_client_hwobj.load_queue()
if loaded_model is not None:
self.dc_tree_widget.sample_tree_widget.clear()
model_map = {"free-pin": 0, "ispyb": 1, "plate": 2}
self.sample_changer_widget.filter_cbox.setCurrentIndex(
model_map[loaded_model]
)
self.mount_mode_combo_changed(model_map[loaded_model])
self.select_last_added_item()
self.dc_tree_widget.scroll_to_item(self.dc_tree_widget.last_added_item)
return loaded_model
def queue_autosave_clicked(self):
"""Enable/disable queue autosave"""
pass
def queue_undo_clicked(self):
"""If queue autosave is enabled then undo last change"""
self.dc_tree_widget.undo_queue()
def queue_redo_clicked(self):
"""If queue autosave is enable then redo last changed"""
self.dc_tree_widget.redo_queue()
def queue_sync_clicked(self):
"""Add diffraction plan from ISPyB to all samples"""
self.dc_tree_widget.sample_tree_widget.selectAll()
self.dc_tree_widget.sync_diffraction_plan()
def data_path_changed(self, conflict):
"""Data path changed event. Used in state machine"""
self.dc_tree_widget.item_parameters_changed()
self.set_condition_state("data_path_valid", not conflict)
def acq_parameters_changed(self, conflict):
"""Acq parameter changed event. Used in state machine"""
self.dc_tree_widget.item_parameters_changed()
self.set_condition_state("acq_parameters_valid", len(conflict) == 0)
def set_condition_state(self, condition_name, value):
"""Sets condition to defined state"""
if self.state_machine_hwobj is not None:
self.state_machine_hwobj.condition_changed(condition_name, value)
| lgpl-3.0 | -3,314,555,617,902,337,000 | 41.44164 | 100 | 0.579697 | false |
mhspradlin/go-lite-bot | bot.py | 1 | 1582 | # Simply the class definitions for the bot and worker declarations
# Nice way to make HTTP get requests
import requests
# A nice holder for information we need between function calls
class Bot:
double_resets = {}
def __init__ (self, token):
self.token = token
handlers = {}
# Adds a single event handler
def addHandler (self, text, func):
handlers[text] = func
# Sends a text message to the specified chat_id
def sendMessage (self, chat_id = None, text = None):
if (chat_id != None and text != None):
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
while r.status_code != requests.codes.ok:
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
# Sends as photo using multipart-formdata
# Note that photo is a file-like object (like a StringIO object)
def sendImage (self, chat_id = None, photo = None):
if (chat_id != None and photo != None):
data = { 'chat_id' : str(chat_id) }
files = { 'photo' : ('board-image.png', photo) }
requests.post('https://api.telegram.org/bot' + self.token +
'/sendPhoto', data = data, files = files)
| mit | 952,455,742,594,944,600 | 38.575 | 79 | 0.513274 | false |
AlexRiina/django-money | tests/test_form.py | 1 | 2341 | # -*- coding: utf-8 -*-
"""
Created on May 7, 2011
@author: jake
"""
from decimal import Decimal
import moneyed
import pytest
from moneyed import Money
from .testapp.forms import (
MoneyForm,
MoneyFormMultipleCurrencies,
MoneyModelForm,
OptionalMoneyForm,
)
from .testapp.models import ModelWithVanillaMoneyField
pytestmark = pytest.mark.django_db
def test_save():
money = Money(Decimal('10'), moneyed.SEK)
form = MoneyModelForm({'money_0': money.amount, 'money_1': money.currency})
assert form.is_valid()
instance = form.save()
retrieved = ModelWithVanillaMoneyField.objects.get(pk=instance.pk)
assert money == retrieved.money
def test_validate():
money = Money(Decimal('10'), moneyed.SEK)
form = MoneyForm({'money_0': money.amount, 'money_1': money.currency})
assert form.is_valid()
result = form.cleaned_data['money']
assert result == money
@pytest.mark.parametrize(
'data',
(
{'money_0': 'xyz*|\\', 'money_1': moneyed.SEK},
{'money_0': 10000, 'money_1': moneyed.SEK},
{'money_0': 1, 'money_1': moneyed.SEK},
{'money_0': 10, 'money_1': moneyed.EUR}
)
)
def test_form_is_invalid(data):
assert not MoneyForm(data).is_valid()
@pytest.mark.parametrize(
'data, result',
(
({'money_0': '', 'money_1': moneyed.SEK}, []),
({'money_0': '1.23', 'money_1': moneyed.SEK}, ['money']),
)
)
def test_changed_data(data, result):
assert MoneyForm(data).changed_data == result
def test_change_currency_not_amount():
"""
If the amount is the same, but the currency changes, then we
should consider this to be a change.
"""
form = MoneyFormMultipleCurrencies(
{'money_0': Decimal(10), 'money_1': moneyed.EUR},
initial={'money': Money(Decimal(10), moneyed.SEK)}
)
assert form.changed_data == ['money']
@pytest.mark.parametrize(
'data, result',
(
({'money_1': moneyed.SEK}, True),
({'money_0': '', 'money_1': moneyed.SEK}, True),
({'money_0': 'xyz*|\\', 'money_1': moneyed.SEK}, False),
)
)
def test_optional_money_form(data, result):
"""
The currency widget means that 'money_1' will always be filled
in, but 'money_0' could be absent/empty.
"""
assert OptionalMoneyForm(data).is_valid() is result
| bsd-3-clause | -7,676,951,155,857,008,000 | 23.642105 | 79 | 0.620248 | false |
uw-it-aca/mdot-developers | mdotdevs/views.py | 1 | 3614 | from django.conf import settings
from django.template.loader import get_template
from django.template import RequestContext, Context
from django.shortcuts import render_to_response, render
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse, HttpResponseRedirect
import urllib
import json
from forms import ReviewForm
def home(request):
return render_to_response(
'mdotdevs/home.html',
context_instance=RequestContext(request))
def guidelines(request):
return render_to_response(
'mdotdevs/guidelines.html',
context_instance=RequestContext(request))
def process(request):
return render_to_response(
'mdotdevs/process.html',
context_instance=RequestContext(request))
def review(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ReviewForm(request.POST)
# check whether it's valid:
if form.is_valid():
campus_audience = form.cleaned_data['campus_audience']
campus_need = form.cleaned_data['campus_need']
sponsor_name = form.cleaned_data['sponsor_name']
sponsor_netid = form.cleaned_data['sponsor_netid']
sponsor_email = form.cleaned_data['sponsor_email']
dev_name = form.cleaned_data['dev_name']
dev_email = form.cleaned_data['dev_email']
support_name = form.cleaned_data['support_name']
support_email = form.cleaned_data['support_email']
support_contact = form.cleaned_data['support_contact']
ats_review = form.cleaned_data['ats_review']
ux_review = form.cleaned_data['ux_review']
brand_review = form.cleaned_data['brand_review']
app_documentation = form.cleaned_data['app_documentation']
app_code = form.cleaned_data['app_code']
anything_else = form.cleaned_data['anything_else']
email_context = Context({
'campus_audience': campus_audience,
'campus_need': campus_need,
'sponsor_name': sponsor_name,
'sponsor_netid': sponsor_netid,
'sponsor_email': sponsor_email,
'dev_name': dev_name,
'dev_email': dev_email,
'support_name': support_name,
'support_email': support_email,
'support_contact': support_contact,
'ats_review': ats_review,
'ux_review': ux_review,
'brand_review': brand_review,
'app_documentation': app_documentation,
'app_code': app_code,
'anything_else': anything_else
})
try:
send_mail(
sponsor_name,
get_template(
'mdotdevs/email_plain.html').render(email_context),
sponsor_email, ['[email protected]'],
html_message=get_template('mdotdevs/email_html.html')
.render(email_context),
),
except BadHeaderError:
return HttpResponse('Invalid header found.')
return render_to_response(
'mdotdevs/thanks.html',
context_instance=RequestContext(request))
# if a GET (or any other method) we'll create a blank form
else:
form = ReviewForm()
return render(request, 'mdotdevs/review.html', {'form': form})
| apache-2.0 | -5,927,210,176,114,259,000 | 38.714286 | 76 | 0.589651 | false |
yayoiukai/signalserver | policies/views.py | 1 | 11360 | import os
import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.encoding import smart_str
from xml.dom import minidom
from xml.etree.ElementTree import Element, SubElement, Comment
import xml.etree.ElementTree as ET
from .models import Policy, Operation, PolicyFile
from fileuploads.constants import POLICY_FILEPATH
from groups.models import Result, Row, Process
from .forms import PolicyNameForm
from .forms import PolicyForm
from .forms import PolicyFileForm
from .forms import OperationForm
def replace_letters(policy_name):
if " " in policy_name:
policy_name = policy_name.replace(' ', '_')
if "-" in policy_name:
policy_name = policy_name.replace('-', '_')
return policy_name
def get_dashboard_value(request, keyword='dashboard'):
if not keyword in request.POST:
dashboard = False
else:
dashboard = True
return dashboard
@login_required(login_url="/login/")
def index(request):
if request.method == 'POST':
policy_name = request.POST['policy_name']
policy_name = replace_letters(policy_name)
description = request.POST['description']
dashboard = get_dashboard_value(request)
display_order = request.POST['display_order']
count = Policy.objects.filter(
policy_name=policy_name).count()
if count > 0:
message = "policy name : " + policy_name + " is already taken. \
Please choose different name. Policy name needs to be unique."
return render_index(request, message)
else:
new_policy = Policy(
policy_name=policy_name,
display_order=display_order,
description=description,
dashboard=dashboard
)
new_policy.save()
return render_index(request, None)
def render_index(request, message):
form = PolicyForm() # A empty, unbound form
file_form = PolicyFileForm()
# Load documents for the list page
policies = Policy.objects.all().order_by('display_order')
new_display_order = policies.count() + 1
# Render list page with the documents and the form
return render(request, 'policies/index.html',
{'policies': policies, 'form': form, 'file_form': file_form,
'message': message, 'new_display_order': new_display_order})
def delete_policy(request, policy_id):
Policy.objects.get(id=policy_id).delete()
return HttpResponseRedirect(reverse('policies:index'))
def create_policy_xml(policy, file_name):
root = ET.Element("policy", name=policy.policy_name)
description = ET.SubElement(root, "description")
description.text = policy.description
operations = Operation.objects.filter(policy=policy)
for op in operations:
ET.SubElement(root, "rule", id=str(op.display_order),
filter_01=op.signal_name,
filter_02=op.second_signal_name, operation=op.op_name,
cutoff_number=str(op.cut_off_number),
dashboard=str(op.dashboard),
group_percentage=str(op.percentage),
file_percentage=str(op.file_percentage)
).text = op.description
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml(indent=" ")
with open(file_name, "w") as f:
f.write(xmlstr)
def get_or_create_policy_file(policy):
original_file_name = policy.policy_name + ".xml"
file_name = os.path.join(POLICY_FILEPATH, original_file_name)
if os.path.exists(file_name):
try:
os.remove(file_name)
except OSError as e:
#errno.ENOENT = no such file or directory
if e.errno != errno.ENOENT:
raise # re-raise exception if a different error occured
create_policy_xml(policy, file_name)
return file_name
def download_policy(request, policy_id):
policy = Policy.objects.get(id=policy_id)
file_name = policy.policy_name
file_path = get_or_create_policy_file(policy)
file_itself = open(file_path, 'rb')
response = HttpResponse(file_itself,
content_type='application/force-download')
response['X-Sendfile'] = file_path
response['Content-Length'] = os.stat(file_path).st_size
response['Content-Disposition'] = 'attachment; \
filename={}.xml'.format(smart_str(file_name))
return response
def create_policy_from_file(file_name):
new_file_name = os.path.join(POLICY_FILEPATH, file_name)
tree = ET.parse(new_file_name)
root = tree.getroot()
policy_name = root.attrib['name']
if Policy.objects.filter(policy_name=policy_name).count() > 0:
d = datetime.datetime.now()
policy_name = policy_name + '_uploaded_on_' + \
d.strftime("%Y_%m_%d_%H:%M")
desc = root.findall('description')[0].text
new_policy = Policy(
policy_name=policy_name,
description=desc
)
new_policy.save()
for child in root:
if child.tag == 'description':
continue
rule = child.attrib
desc = rule.get('description')
if desc is None:
desc = "No description"
new_operation = Operation(
policy=new_policy,
cut_off_number=rule.get('cutoff_number'),
signal_name=rule.get('filter_01'),
second_signal_name=rule.get('filter_02'),
op_name=rule.get('operation'),
description=desc,
percentage=rule.get('group_percentage'),
file_percentage=rule.get('file_percentage'),
dashboard=rule.get('dashboard')
)
new_operation.save()
@login_required(login_url="/login/")
def upload(request):
# Handle policy file upload
user_name = request.user.username
message = None
if request.method == 'POST':
form = PolicyFileForm(request.POST, request.FILES)
policy_file = request.FILES.get('policyfile')
if form.is_valid():
original_name = policy_file.name
extension = original_name[-4:]
if extension != ".xml":
message = "File format needs to be .xml. Your file is "
message = message + original_name + "\n"
else:
new_policy_file = PolicyFile(
policy_file=policy_file,
file_name=original_name,
)
new_policy_file.save()
create_policy_from_file(original_name)
else:
message = "something wrong with form"
return HttpResponseRedirect(reverse('policies:index'))
def delete_rule(request, op_id, policy_id):
Operation.objects.get(id=op_id).delete()
return HttpResponseRedirect(reverse('policies:show',
kwargs={'policy_id': policy_id}))
def edit_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard, id_num):
operation = Operation.objects.get(id=id_num)
operation.policy = policy
operation.cut_off_number = cutoff_num
operation.signal_name = sig_name
operation.second_signal_name = sig2_name
operation.op_name = op_name
operation.description = description
operation.percentage = percentage
operation.file_percentage = file_percentage
operation.dashboard = dashboard
operation.save()
def add_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard):
new_operation = Operation(
policy=policy,
cut_off_number=cutoff_num,
signal_name=sig_name,
second_signal_name=sig2_name,
op_name=op_name,
display_order=display_order,
description=description,
percentage=percentage,
file_percentage=file_percentage,
dashboard=dashboard
)
new_operation.save()
def update_policy(request, policy):
keyword = 'policy_dashboard'
dashboard = get_dashboard_value(request, keyword)
version = request.POST['version']
policy.dashboard = dashboard
policy.version = version
policy.save()
return policy
@login_required(login_url="/login/")
def show(request, policy_id):
policy = Policy.objects.get(id=policy_id)
if request.method == 'POST':
form = OperationForm(request.POST)
action = request.POST['action']
if action == "update_policy":
policy = update_policy(request, policy)
else:
dashboard = get_dashboard_value(request)
cutoff_num = request.POST.get('cutoff_number', 0)
sig_name = request.POST['signal_fields']
sig2_name = request.POST['second_signal_fields']
op_name = request.POST['operation_fields']
display_order = request.POST['display_order']
description = request.POST['description']
percentage = request.POST['percentage']
file_percentage = request.POST['file_percentage']
if action == 'new':
add_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard)
else:
id_num = request.POST['id_num']
edit_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard,
id_num)
policy.user_name = request.user.username
policy.save()
operation = Operation.objects.filter(
policy=policy).order_by('display_order')
length = len(operation) + 1
form = OperationForm() # A empty, unbound form
return render(request, 'policies/show.html',
{'policy': policy,
'form': form,
'operation': operation, 'length': length})
def rename(request):
if request.method == 'POST':
old_name = request.POST['old_name']
new_name = request.POST['new_name']
new_name = replace_letters(new_name)
policy = Policy.objects.get(
policy_name=old_name)
processes = Process.objects.filter(policy_name=old_name)
for process in processes:
process.policy_name = new_name
process.save()
policy.policy_name = new_name
policy.save()
return HttpResponseRedirect(reverse('policies:show',
kwargs={'policy_id': policy.id}))
def results(request, policy_id):
response = "result of policies %s."
return HttpResponse(response % policy_id)
def detail(request, policy_id):
try:
operation = Operation.objects.get(pk=operation_id)
except Operation.DoesNotExist:
raise Http404("Operation does not exist")
return render(request, 'policies/detail.html', {'operation': operation})
| mit | -2,504,476,941,914,115,600 | 35.178344 | 79 | 0.617077 | false |
unicefuganda/edtrac | edtrac_project/rapidsms_xforms_src/test_settings.py | 1 | 5949 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
# encoding=utf-8
# -------------------------------------------------------------------- #
# MAIN CONFIGURATION #
# -------------------------------------------------------------------- #
# you should configure your database here before doing any real work.
# see: http://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "rapidsms.sqlite3",
}
}
# the rapidsms backend configuration is designed to resemble django's
# database configuration, as a nested dict of (name, configuration).
#
# the ENGINE option specifies the module of the backend; the most common
# backend types (for a GSM modem or an SMPP server) are bundled with
# rapidsms, but you may choose to write your own.
#
# all other options are passed to the Backend when it is instantiated,
# to configure it. see the documentation in those modules for a list of
# the valid options for each.
INSTALLED_BACKENDS = {
#"att": {
# "ENGINE": "rapidsms.backends.gsm",
# "PORT": "/dev/ttyUSB0"
#},
#"verizon": {
# "ENGINE": "rapidsms.backends.gsm,
# "PORT": "/dev/ttyUSB1"
#},
"message_tester": {
"ENGINE": "rapidsms.backends.bucket",
}
}
# to help you get started quickly, many django/rapidsms apps are enabled
# by default. you may wish to remove some and/or add your own.
INSTALLED_APPS = [
# the essentials.
"django_nose",
"djtables",
"rapidsms",
# common dependencies (which don't clutter up the ui).
"rapidsms.contrib.handlers",
"rapidsms.contrib.ajax",
# enable the django admin using a little shim app (which includes
# the required urlpatterns), and a bunch of undocumented apps that
# the AdminSite seems to explode without.
"django.contrib.sites",
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.contenttypes",
"rapidsms.contrib.djangoadmin",
# the rapidsms contrib apps.
"rapidsms.contrib.default",
"rapidsms.contrib.export",
"rapidsms.contrib.httptester",
"rapidsms.contrib.locations",
"rapidsms.contrib.messagelog",
"rapidsms.contrib.messaging",
"rapidsms.contrib.registration",
"rapidsms.contrib.scheduler",
"rapidsms.contrib.echo",
"uni_form",
"rapidsms_xforms",
]
# this rapidsms-specific setting defines which views are linked by the
# tabbed navigation. when adding an app to INSTALLED_APPS, you may wish
# to add it here, also, to expose it in the rapidsms ui.
RAPIDSMS_TABS = []
#--------------------------------------------------------------------- #
# BORING CONFIGURATION #
# -------------------------------------------------------------------- #
# debug mode is turned on as default, since rapidsms is under heavy
# development at the moment, and full stack traces are very useful
# when reporting bugs. don't forget to turn this off in production.
DEBUG = TEMPLATE_DEBUG = True
# after login (which is handled by django.contrib.auth), redirect to the
# dashboard rather than 'accounts/profile' (the default).
LOGIN_REDIRECT_URL = "/"
# use django-nose to run tests. rapidsms contains lots of packages and
# modules which django does not find automatically, and importing them
# all manually is tiresome and error-prone.
TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
# for some reason this setting is blank in django's global_settings.py,
# but it is needed for static assets to be linkable.
MEDIA_URL = "/static/"
# this is required for the django.contrib.sites tests to run, but also
# not included in global_settings.py, and is almost always ``1``.
# see: http://docs.djangoproject.com/en/dev/ref/contrib/sites/
SITE_ID = 1
# the default log settings are very noisy.
LOG_LEVEL = "DEBUG"
LOG_FILE = "rapidsms.log"
LOG_FORMAT = "[%(name)s]: %(message)s"
LOG_SIZE = 8192 # 8192 bits = 8 kb
LOG_BACKUPS = 256 # number of logs to keep
# these weird dependencies should be handled by their respective apps,
# but they're not, so here they are. most of them are for django admin.
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
]
# -------------------------------------------------------------------- #
# HERE BE DRAGONS! #
# these settings are pure hackery, and will go away soon #
# -------------------------------------------------------------------- #
# these apps should not be started by rapidsms in your tests, however,
# the models and bootstrap will still be available through django.
TEST_EXCLUDED_APPS = [
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.auth",
"rapidsms",
"rapidsms.contrib.ajax",
"rapidsms.contrib.httptester",
]
# the default ROOT_URLCONF module, bundled with rapidsms, detects and
# maps the urls.py module of each app into a single project urlconf.
# this is handy, but too magical for the taste of some. (remove it?)
ROOT_URLCONF = "rapidsms.djangoproject.urls"
# since we might hit the database from any thread during testing, the
# in-memory sqlite database isn't sufficient. it spawns a separate
# virtual database for each thread, and syncdb is only called for the
# first. this leads to confusing "no such table" errors. We create
# a named temporary instance instead.
import os
import tempfile
import sys
if 'test' in sys.argv:
for db_name in DATABASES:
DATABASES[db_name]['TEST_NAME'] = os.path.join(
tempfile.gettempdir(),
"%s.rapidsms.test.sqlite3" % db_name)
| bsd-3-clause | 183,958,754,814,737,820 | 32.234637 | 72 | 0.635233 | false |
sixty-north/cosmic-ray | tests/resources/example_project/adam/adam_1.py | 1 | 1178 | """adam.adam_1
"""
# pylint: disable=C0111
import operator
from math import * # noqa: F401,F403
# Add mutation points for comparison operators.
def constant_number():
return 42
def constant_true():
return True
def constant_false():
return False
def bool_and():
return object() and None
def bool_or():
return object() or None
def bool_expr_with_not():
return not object()
def bool_if():
if object():
return True
raise Exception("bool_if() failed")
def if_expression():
return True if object() else None
def assert_in_func():
assert object()
return True
def unary_sub():
return -1
def unary_add():
return +1
def binary_add():
return 5 + 6
def equals(vals):
def constraint(x, y):
return operator.xor(x == y, x != y)
return all([constraint(x, y) for x in vals for y in vals])
def use_break(limit):
for x in range(limit):
break
return x
def use_continue(limit):
for x in range(limit):
continue
return x
def use_star_args(*args):
pass
def use_extended_call_syntax(x):
use_star_args(*x)
def use_star_expr(x):
a, *b = x
| mit | 6,149,737,179,712,978,000 | 11.804348 | 62 | 0.611205 | false |
bytescout/ByteScout-SDK-SourceCode | PDF.co Web API/Add Text And Images To PDF/Python/Add Image by finding target coordinates/AddImageByFindingTargetCoordinates.py | 1 | 3639 | import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "**************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Direct URL of source PDF file.
SourceFileUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/sample.pdf"
# Search string.
SearchString = 'Your Company Name'
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination PDF file name
DestinationFile = ".//result.pdf"
# Image params
Type = "image"
Width = 119
Height = 32
ImageUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/logo.png"
def main(args = None):
# First of all try to find Text within input PDF file
res = findTextWithinPDF(SourceFileUrl, SearchString)
if res:
addImageToPDF(DestinationFile, res['top'], res['left'])
else:
print("No result found!")
def findTextWithinPDF(sourceFile, searchText):
# Prepare URL for PDF text search API call
# See documentation: https://app.pdf.co/documentation/api/1.0/pdf/find.html
retVal = dict()
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["url"] = sourceFile
parameters["searchString"] = searchText
url = "{}/pdf/find".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={"x-api-key": API_KEY})
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# print(json)
if json["body"]:
retVal['top'] = json["body"][0]['top']
retVal['left'] = json["body"][0]['left']
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return retVal
def addImageToPDF(destinationFile, top, left):
"""Add image using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["password"] = Password
parameters["pages"] = Pages
parameters["url"] = SourceFileUrl
parameters["type"] = Type
parameters["x"] = top + 300
parameters["y"] = left
parameters["width"] = Width
parameters["height"] = Height
parameters["urlimage"] = ImageUrl
# Prepare URL for 'PDF Edit' API request
url = "{}/pdf/edit/add".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={"x-api-key": API_KEY})
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
if __name__ == '__main__':
main() | apache-2.0 | 3,477,542,829,167,449,600 | 29.082645 | 110 | 0.623523 | false |
numberly/graphitesend | tests/test_all.py | 1 | 10724 | #!/usr/bin/env python
from graphitesend import graphitesend
import unittest
import socket
import os
class TestAll(unittest.TestCase):
""" Basic tests ( better than nothing ) """
def setUp(self):
""" reset graphitesend """
# Drop any connections or modules that have been setup from other tests
graphitesend.reset()
# Monkeypatch the graphitesend so that it points at a graphite service
# running on one of my ([email protected]) systems.
# graphitesend.default_graphite_server = 'graphite.dansysadm.com'
graphitesend.default_graphite_server = 'localhost'
self.hostname = os.uname()[1]
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('localhost', 2003))
self.server.listen(5)
def tearDown(self):
""" reset graphitesend """
# Drop any connections or modules that have been setup from other tests
graphitesend.reset()
try:
self.server.shutdown(socket.SHUT_RD)
self.server.close()
except Exception:
pass
self.server = None
def test_connect_exception_on_badhost(self):
bad_graphite_server = 'missinggraphiteserver.example.com'
graphitesend.default_graphite_server = bad_graphite_server
with self.assertRaises(graphitesend.GraphiteSendException):
graphitesend.init()
def test_set_lowercase_metric_names(self):
g = graphitesend.init(lowercase_metric_names=True)
self.assertEqual(g.lowercase_metric_names, True)
def test_lowercase_metric_names(self):
g = graphitesend.init(lowercase_metric_names=True)
send_data = g.send('METRIC', 1)
self.assertEqual('metric' in send_data, True)
self.assertEqual('METRIC' in send_data, False)
def test_create_graphitesend_instance(self):
g = graphitesend.init()
expected_type = type(graphitesend.GraphiteClient())
g_type = type(g)
self.assertEqual(g_type, expected_type)
def test_monkey_patch_of_graphitehost(self):
g = graphitesend.init()
custom_prefix = g.addr[0]
self.assertEqual(custom_prefix, 'localhost')
def test_fqdn_squash(self):
g = graphitesend.init(fqdn_squash=True)
custom_prefix = g.prefix
expected_results = 'systems.%s.' % self.hostname.replace('.', '_')
self.assertEqual(custom_prefix, expected_results)
def test_noprefix(self):
g = graphitesend.init()
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'systems.%s.' % self.hostname)
def test_system_name(self):
g = graphitesend.init(system_name='remote_host')
custom_prefix = g.prefix
expected_prefix = 'systems.remote_host.'
self.assertEqual(custom_prefix, expected_prefix)
def test_empty_system_name(self):
g = graphitesend.init(system_name='')
custom_prefix = g.prefix
expected_prefix = 'systems.'
self.assertEqual(custom_prefix, expected_prefix)
def test_no_system_name(self):
g = graphitesend.init(group='foo')
custom_prefix = g.prefix
expected_prefix = 'systems.%s.foo.' % self.hostname
self.assertEqual(custom_prefix, expected_prefix)
def test_prefix(self):
g = graphitesend.init(prefix='custom_prefix')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_prefix_double_dot(self):
g = graphitesend.init(prefix='custom_prefix.')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_prefix_remove_spaces(self):
g = graphitesend.init(prefix='custom prefix')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_set_prefix_group(self):
g = graphitesend.init(prefix='prefix', group='group')
custom_prefix = g.prefix
expected_prefix = 'prefix.%s.group.' % self.hostname
self.assertEqual(custom_prefix, expected_prefix)
def test_set_prefix_group_system(self):
g = graphitesend.init(prefix='prefix', system_name='system',
group='group')
custom_prefix = g.prefix
expected_prefix = 'prefix.system.group.'
self.assertEqual(custom_prefix, expected_prefix)
def test_set_suffix(self):
g = graphitesend.init(suffix='custom_suffix')
custom_suffix = g.suffix
self.assertEqual(custom_suffix, 'custom_suffix')
def test_set_group_prefix(self):
g = graphitesend.init(group='custom_group')
expected_prefix = "systems.%s.custom_group." % self.hostname
custom_prefix = g.prefix
self.assertEqual(custom_prefix, expected_prefix)
def test_default_prefix(self):
g = graphitesend.init()
expected_prefix = "systems.%s." % self.hostname
custom_prefix = g.prefix
self.assertEqual(custom_prefix, expected_prefix)
def test_leave_suffix(self):
g = graphitesend.init()
default_suffix = g.suffix
self.assertEqual(default_suffix, '')
def test_clean_metric(self):
g = graphitesend.init()
#
metric_name = g.clean_metric_name('test(name)')
self.assertEqual(metric_name, 'test_name')
#
metric_name = g.clean_metric_name('test name')
self.assertEqual(metric_name, 'test_name')
#
metric_name = g.clean_metric_name('test name')
self.assertEqual(metric_name, 'test__name')
def test_reset(self):
graphitesend.init()
graphitesend.reset()
graphite_instance = graphitesend._module_instance
self.assertEqual(graphite_instance, None)
def test_force_failure_on_send(self):
graphite_instance = graphitesend.init()
graphite_instance.disconnect()
with self.assertRaises(graphitesend.GraphiteSendException):
graphite_instance.send('metric', 0)
def test_force_unknown_failure_on_send(self):
graphite_instance = graphitesend.init()
graphite_instance.socket = None
with self.assertRaises(graphitesend.GraphiteSendException):
graphite_instance.send('metric', 0)
def test_send_list_metric_value(self):
graphite_instance = graphitesend.init(prefix='test', system_name='local')
response = graphite_instance.send_list([('metric', 1)])
self.assertEqual('long message: test.local.metric 1' in response, True)
self.assertEqual('1.00000' in response, True)
def test_send_list_metric_value_single_timestamp(self):
# Make sure it can handle custom timestamp
graphite_instance = graphitesend.init(prefix='test')
response = graphite_instance.send_list([('metric', 1)], timestamp=1)
# self.assertEqual('sent 23 long message: test.metric' in response,
# True)
self.assertEqual('1.00000' in response, True)
self.assertEqual(response.endswith('1\n'), True)
def test_send_list_metric_value_timestamp(self):
graphite_instance = graphitesend.init(prefix='test')
# Make sure it can handle custom timestamp
response = graphite_instance.send_list([('metric', 1, 1)])
# self.assertEqual('sent 23 long message: test.metric' in response,
# True)
self.assertEqual('1.00000' in response, True)
self.assertEqual(response.endswith('1\n'), True)
def test_send_list_metric_value_timestamp_2(self):
graphite_instance = graphitesend.init(prefix='test', system_name='')
# Make sure it can handle custom timestamp
response = graphite_instance.send_list(
[('metric', 1, 1), ('metric', 1, 2)])
# self.assertEqual('sent 46 long message:' in response, True)
self.assertEqual('test.metric 1.000000 1' in response, True)
self.assertEqual('test.metric 1.000000 2' in response, True)
def test_send_list_metric_value_timestamp_3(self):
graphite_instance = graphitesend.init(prefix='test', system_name='')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
response = graphite_instance.send_list(
[
('metric', 1, 1),
('metric', 2),
]
)
# self.assertEqual('sent 46 long message:' in response, True)
self.assertEqual('test.metric 1.000000 1' in response, True)
self.assertEqual('test.metric 2.000000 2' not in response, True)
def test_send_list_metric_value_timestamp_default(self):
graphite_instance = graphitesend.init(prefix='test', system_name='bar')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
response = graphite_instance.send_list(
[
('metric', 1, 1),
('metric', 2),
],
timestamp='4'
)
# self.assertEqual('sent 69 long message:' in response, True)
self.assertEqual('test.bar.metric 1.000000 1' in response, True)
self.assertEqual('test.bar.metric 2.000000 4' in response, True)
def test_send_list_metric_value_timestamp_default_2(self):
graphite_instance = graphitesend.init(prefix='test', system_name='foo')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
(c, addr) = self.server.accept()
response = graphite_instance.send_list(
[
('metric', 1),
('metric', 2, 2),
],
timestamp='4'
)
# self.assertEqual('sent 69 long message:' in response, True)
self.assertEqual('test.foo.metric 1.000000 4' in response, True)
self.assertEqual('test.foo.metric 2.000000 2' in response, True)
sent_on_socket = c.recv(69)
self.assertEqual('test.foo.metric 1.000000 4' in sent_on_socket, True)
self.assertEqual('test.foo.metric 2.000000 2' in sent_on_socket, True)
# self.server.shutdown(socket.SHUT_RD)
# self.server.close()
def test_send_value_as_string(self):
# Make sure it can handle custom timestamp
graphite_instance = graphitesend.init(prefix='')
response = graphite_instance.send("metric", "1", "1")
self.assertEqual('1.00000' in response, True)
print response
self.assertEqual(response.endswith('1\n'), True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,224,074,902,658,010,000 | 39.014925 | 81 | 0.631854 | false |
reedessick/pointy-Poisson | stripRawUnsafe.py | 1 | 1801 | #!/usr/bin/python
usage = "stripRawUnsafe.py unsafe.txt interesting.txt"
description = "reads in a list of unsafe channels from unsafe.txt. If these are not \"raw\" channel names, it converts them to that form. I then reads in a channel list from interesting.txt and performs a filter based on the unsafe channels. Channels not flagged as unsafe are printed to stdout while channels flagged as unsafe are printed to stderr"
author = "[email protected]"
import sys
from collections import defaultdict
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
opts, args = parser.parse_args()
if len(args)!=2:
raise ValueError("Please supply exactly 2 input arguments\n%s"%(usage))
unsafe, interesting = args
#-------------------------------------------------
### read in unsafe channel list
file_obj = open(unsafe, "r")
unsafe_chans = defaultdict( set() )
for chan in file_obj:
chan = chan.strip()
if chan[2] == "-": ### interpret at KW channel name -> convert!
chan = chan.split("_")
ifo, chan = chan[0], "%s"%("_".join(chan[1:-2]))
else:
ifo, chan = chan.split(":")
unsafe_chans[ifo].add( chan )
file_obj.close()
#-------------------------------------------------
### read in interesting channel list and parse
file_obj = open(interesting, "r")
for channel in file_obj:
channel = channel.strip()
chan = channel
if chan[2] == "-": ### interpret at KW channel name -> convert!
chan = chan.split("_")
ifo, chan = chan[0], "%s"%("_".join(chan[1:-2]))
else:
ifo, chan = chan.split(":")
if chan in unsafe_chans[ifo]:
print >> sys.stderr, channel
else:
print >> sys.stdout, channel
file_obj.close()
| mit | -4,477,093,486,662,943,000 | 33.634615 | 350 | 0.60633 | false |
terasaur/tstracker | mqclient/src/tstracker/stats_db.py | 1 | 2493 | #
# Copyright 2012 ibiblio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import terasaur.db.mongodb_db as mongodb_db
import pymongo
from datetime import datetime
import pytz
"""
Functions for storing and retrieving torrent statistics data. See
torrent_stats module for details about data model.
"""
STATS_CONTROL_COLLECTION = 'stats_control'
STATS_DATA_MINUTE = 'stats_minute'
STATS_DATA_HOUR = 'stats_hour'
STATS_DATA_DAY = 'stats_day'
def get_control_value(key):
result = mongodb_db.get(STATS_CONTROL_COLLECTION, {'_id': key})
if result:
return result['v']
else:
return None
def set_control_value(key, value):
query = {'_id': key}
data = {"$set": {'v': value}}
mongodb_db.update(STATS_CONTROL_COLLECTION, query, data)
def get_conn():
return mongodb_db.get_db_conn()
def get_minute_stats(torrent):
return _get_stats(STATS_DATA_MINUTE, torrent)
def get_hour_stats(torrent):
return _get_stats(STATS_DATA_HOUR, torrent)
def get_day_stats(torrent):
return _get_stats(STATS_DATA_DAY, torrent)
def _get_stats(timeframe, torrent):
conn = get_conn()
db = conn[mongodb_db.DB_PARAMS['db_name']]
res = db[timeframe].find({'ih':torrent.info_hash}).sort('ih')
return res
def initialize():
conn = mongodb_db.get_db_conn()
db = conn[mongodb_db.DB_PARAMS['db_name']]
# info hash index
db[STATS_DATA_MINUTE].ensure_index('info_hash')
db[STATS_DATA_HOUR].ensure_index('info_hash')
db[STATS_DATA_DAY].ensure_index('info_hash')
# control keys
_initialize_date(db, 'last_incremental')
_initialize_date(db, 'last_capture_minute')
_initialize_date(db, 'last_capture_hour')
_initialize_date(db, 'last_capture_day')
conn.end_request()
def _initialize_date(db, key):
value = get_control_value(key)
if value is None:
zero_date = datetime(1970, 1, 1, 0, 0, 0, 0, pytz.utc)
data = {'_id': key, 'v': zero_date}
db[STATS_CONTROL_COLLECTION].save(data)
| apache-2.0 | 6,713,933,945,272,915,000 | 29.777778 | 74 | 0.686723 | false |
ESSS/qmxgraph | qmxgraph/widget.py | 1 | 23408 | from __future__ import absolute_import
import json
import os
import weakref
from PyQt5.QtCore import QDataStream, QIODevice, QObject, Qt, pyqtSignal
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QDialog, QGridLayout, QShortcut, QSizePolicy, \
QWidget, QStyleOption, QStyle
from qmxgraph import constants, render
from qmxgraph.api import QmxGraphApi
from qmxgraph.configuration import GraphOptions, GraphStyles
from ._web_view import QWebViewWithDragDrop
# Some ugliness to successfully build the doc on ReadTheDocs...
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
from qmxgraph import resource_mxgraph, resource_qmxgraph # noqa
class QmxGraph(QWidget):
"""
A graph widget that is actually an web view using as backend mxGraph_,
a very feature rich JS graph library which is also used as backend to
the powerful Google Drive's draw.io widget.
**Tags**
Tags don't have any impact or influence on QmxGraph features. It is just a
feature so client code can associate custom data with cells created in a
graph.
Tags can be helpful, for instance, to be able to infer an
application-specific type of a dragged & dropped new cell. When added cell
events are handled, client code can just query tags to know this
information. Without tags, it would need to infer based on unreliable
heuristics like current style or label.
An important observation is that tag values are *always* strings. If a
value of other type is used it will raise an error.
**Debug/Inspection**
It is possible to open a web inspector for underlying graph drawing page by
typing `F12` with widget focused.
.. _mxGraph: https://jgraph.github.io/mxgraph/
"""
# Signal fired when underlying web view finishes loading. Argument
# indicates if loaded successfully.
loadFinished = pyqtSignal(bool)
def __init__(
self,
options=None,
styles=None,
stencils=tuple(),
auto_load=True,
parent=None,
):
"""
:param qmxgraph.configuration.GraphOptions|None options: Features
enabled in graph drawing widget. If none given, uses defaults.
:param qmxgraph.configuration.GraphStyles|None styles: Additional
styles made available for graph drawing widget besides mxGraph's
default ones. If none given only mxGraph defaults are available.
:param iterable[str] stencils: A sequence of XMLs available in Qt
resource collections. Each XML must respect format defined by
mxGraph (see
https://jgraph.github.io/mxgraph/docs/js-api/files/shape/mxStencil-js.html#mxStencil
and
https://jgraph.github.io/mxgraph/javascript/examples/stencils.xml
for reference).
:param bool auto_load: If should load page as soon as widget is
initialized.
:param QWidget|None parent: Parent widget.
"""
QWidget.__init__(self, parent)
self._own_path = ':/qmxgraph'
self._mxgraph_path = ':/mxgraph'
if options is None:
options = GraphOptions()
self._options = options
if styles is None:
styles = GraphStyles(styles={})
self._styles = styles
self._stencils = stencils
# Web view fills whole widget area
self._layout = QGridLayout(self)
self._layout.setContentsMargins(0, 0, 0, 0) # no margin to web view
self._web_view = QWebViewWithDragDrop()
self._web_view.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding)
# Starts disabled, only enable once finished loading page (as user
# interaction before that would be unsafe)
# TODO: widget remain with disabled appearance even after enabled
# self.setEnabled(False)
self._layout.addWidget(self._web_view, 0, 0, 1, 1)
self._error_bridge = None
self._events_bridge = None
self._drag_drop_handler = None
# Similar to a browser, QmxGraph widget is going to allow inspection by
# typing F12
self._inspector_dialog = None
inspector_shortcut = QShortcut(self)
inspector_shortcut.setKey("F12")
inspector_shortcut.activated.connect(self.toggle_inspector)
self._execute_on_load_finished()
self._api = QmxGraphApi(graph=self)
self._web_view.on_drag_enter_event.connect(self._on_drag_enter)
self._web_view.on_drag_move_event.connect(self._on_drag_move)
self._web_view.on_drop_event.connect(self._on_drop)
self._double_click_bridge = _DoubleClickBridge()
self._popup_menu_bridge = _PopupMenuBridge()
if auto_load:
self._load_graph_page()
def paintEvent(self, paint_event):
"""
A simple override to the `QWidget.paintEvent` required soo the QSS
rules have effect over `QWidget` subclasses.
From: http://doc.qt.io/qt-5/stylesheet-reference.html#qwidget-widget
:type paint_event: PyQt5.QtGui.QPaintEvent
"""
opt = QStyleOption()
opt.initFrom(self)
p = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, p, self)
def load(self):
"""
Load graph drawing page, if not yet loaded.
"""
if not self.is_loaded() or not self._web_view.is_loading():
self._load_graph_page()
def is_loaded(self):
"""
:rtype: bool
:return: Is graph page already loaded?
"""
# If failed in initialization of graph and it isn't running do not
# considered it loaded, as graph and its API aren't safe for use
return self._web_view.is_loaded() and \
self._web_view.eval_js('graphs.isRunning()')
def blank(self):
"""
Blanks the graph drawing page, effectively clearing/unloading currently
displayed graph.
"""
if self._inspector_dialog:
self._inspector_dialog.close()
self._inspector_dialog = None
self._web_view.blank()
def set_error_bridge(self, bridge):
"""
Redirects errors on JavaScript code from graph drawing widget to
bridge.
:param ErrorHandlingBridge bridge: Handler for errors.
"""
self._error_bridge = bridge
if self.is_loaded():
self._web_view.add_to_js_window('bridge_error_handler', bridge)
def set_events_bridge(self, bridge):
"""
Redirects events fired by graph on JavaScript code to Python/Qt side
by using a bridge.
:param EventsBridge bridge: Bridge with event handlers.
"""
self._events_bridge = bridge
if self.is_loaded():
self._web_view.add_to_js_window('bridge_events_handler', bridge)
# Bind all known Python/Qt event handlers to JavaScript events
self.api.on_cells_added('bridge_events_handler.on_cells_added')
self.api.on_cells_removed('bridge_events_handler.on_cells_removed')
self.api.on_label_changed('bridge_events_handler.on_label_changed')
self.api.on_selection_changed(
'bridge_events_handler.on_selection_changed')
self.api.on_terminal_changed(
'bridge_events_handler.on_terminal_changed')
self.api.on_terminal_with_port_changed(
'bridge_events_handler.on_terminal_with_port_changed')
self.api.on_view_update(
'bridge_events_handler.on_view_update')
self.api.on_cells_bounds_changed(
'bridge_events_handler.on_cells_bounds_changed')
def set_double_click_handler(self, handler):
"""
Set the handler used for double click in cells of graph.
Unlike other event handlers, double click is exclusive to a single
handler. This follows underlying mxGraph implementation that works in
this manner, with the likely intention of enforcing a single
side-effect happening when a cell is double clicked.
:param callable|None handler: Handler that receives double clicked
cell id as only argument. If None it disconnects double click
handler from graph.
"""
self._set_private_bridge_handler(
self._double_click_bridge.on_double_click,
handler=handler,
setter=self._set_double_click_bridge,
)
def set_popup_menu_handler(self, handler):
"""
Set the handler used for popup menu (i.e. right-click) in cells of
graph.
Unlike other event handlers, popup menu is exclusive to a single
handler. This follows underlying mxGraph implementation that works in
this manner, with the likely intention of enforcing a single
side-effect happening when a cell is right-clicked.
:param callable|None handler: Handler that receives, respectively, id
of cell that was right-clicked, X coordinate in screen coordinates
and Y coordinate in screen coordinates as its three arguments. If
None it disconnects handler from graph.
"""
self._set_private_bridge_handler(
self._popup_menu_bridge.on_popup_menu,
handler=handler,
setter=self._set_popup_menu_bridge,
)
@property
def api(self):
"""
:rtype: qmxgraph.api.QmxGraphApi
:return: Proxy to API to manipulate graph.
"""
return self._api
# Web inspector -----------------------------------------------------------
def show_inspector(self):
"""
Show web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog:
from PyQt5.QtWebKit import QWebSettings
QWebSettings.globalSettings().setAttribute(
QWebSettings.DeveloperExtrasEnabled, True)
dialog = self._inspector_dialog = QDialog(self)
dialog.setWindowTitle("Web Inspector")
dialog.setWindowFlags(
dialog.windowFlags() | Qt.WindowMaximizeButtonHint)
dialog.resize(800, 600)
layout = QGridLayout(dialog)
layout.setContentsMargins(0, 0, 0, 0) # no margin to web view
from PyQt5.QtWebKitWidgets import QWebInspector
inspector = QWebInspector(dialog)
inspector.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding)
inspector.setPage(self.inner_web_view().page())
inspector.setVisible(True)
layout.addWidget(inspector)
self._inspector_dialog.show()
def hide_inspector(self):
"""
Hide web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog:
return
self._inspector_dialog.hide()
def toggle_inspector(self):
"""
Toggle visibility state of web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog or \
not self._inspector_dialog.isVisible():
self.show_inspector()
else:
self.hide_inspector()
# Accessors recommended for debugging/testing only ------------------------
def inner_web_view(self):
"""
:rtype: QWebViewWithDragDrop
:return: Web view widget showing graph drawing page.
"""
return self._web_view
# Overridden events -------------------------------------------------------
def resizeEvent(self, event):
if self.is_loaded():
# Whenever graph widget is resized, it is going to resize
# underlying graph in JS to fit widget as well as possible.
width = event.size().width()
height = event.size().height()
self.api.resize_container(width, height)
event.ignore()
# Protected plumbing methods ----------------------------------------------
def _load_graph_page(self):
"""
Loads the graph drawing page in Qt's web view widget.
"""
mxgraph_path = self._mxgraph_path
own_path = self._own_path
html = render.render_embedded_html(
options=self._options,
styles=self._styles,
stencils=self._stencils,
mxgraph_path=mxgraph_path,
own_path=own_path,
)
from PyQt5.QtCore import QUrl
self._web_view.setHtml(html, baseUrl=QUrl('qrc:/'))
def _execute_on_load_finished(self):
"""
Several actions must be delayed until page finishes loading to take
effect.
"""
self_ref = weakref.ref(self)
def post_load(ok):
self_ = self_ref()
if not self_:
return
if ok:
# TODO: widget remain w/ disabled appearance even after enabled
# Allow user to interact with page again
# self_._web_view.setEnabled(True)
# There is a chance error handler is set before loaded. If so,
# register it on JS once page finishes loading.
if self_._error_bridge:
self_.set_error_bridge(self_._error_bridge)
if self_._events_bridge:
self_.set_events_bridge(self_._events_bridge)
self_._set_double_click_bridge()
self_._set_popup_menu_bridge()
width = self_.width()
height = self_.height()
self_.api.resize_container(width, height)
self_.loadFinished.emit(bool(ok and self_.is_loaded()))
self._web_view.loadFinished.connect(post_load)
def _on_drag_enter(self, event):
"""
:type event: QDragEnterEvent
"""
self._approve_only_dd_mime_type(event)
def _on_drag_move(self, event):
"""
:type event: QDragMoveEvent
"""
self._approve_only_dd_mime_type(event)
def _approve_only_dd_mime_type(self, event):
"""
Only approve events that contain QmxGraph's drag&drop MIME type.
:type event: QDragEnterEvent|QDragMoveEvent
"""
data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)
if not data.isNull():
event.acceptProposedAction()
else:
event.ignore()
def _on_drop(self, event):
"""
Adds to graph contents read from MIME data from drop event.
Note that new vertices are added centered at current mouse position.
:type event: QDropEvent
"""
data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)
if not data.isNull():
data_stream = QDataStream(data, QIODevice.ReadOnly)
parsed = json.loads(data_stream.readString().decode('utf8'))
# Refer to `mime.py` for docs about format
version = parsed['version']
if version not in (1, 2):
raise ValueError(
"Unsupported version of QmxGraph MIME data: {}".format(
version))
x = event.pos().x()
y = event.pos().y()
if version in (1, 2):
vertices = parsed.get('vertices', [])
scale = self.api.get_zoom_scale()
for v in vertices:
# place vertices with an offset so their center falls
# in the event point.
vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale
vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale
self.api.insert_vertex(
x=vertex_x,
y=vertex_y,
width=v['width'],
height=v['height'],
label=v['label'],
style=v.get('style', None),
tags=v.get('tags', {}),
)
if version in (2,):
decorations = parsed.get('decorations', [])
for v in decorations:
self.api.insert_decoration(
x=x,
y=y,
width=v['width'],
height=v['height'],
label=v['label'],
style=v.get('style', None),
tags=v.get('tags', {}),
)
event.acceptProposedAction()
else:
event.ignore()
def _set_double_click_bridge(self):
"""
Redirects double click events fired by graph on JavaScript code to
Python/Qt side by using a private bridge.
"""
if self.is_loaded():
bridge = self._double_click_bridge
self._web_view.add_to_js_window(
'bridge_double_click_handler', bridge)
self.api.set_double_click_handler(
'bridge_double_click_handler.on_double_click')
def _set_popup_menu_bridge(self):
"""
Redirects popup menu (i.e. right click) events fired by graph on
JavaScript code to Python/Qt side by using a private bridge.
"""
if self.is_loaded():
bridge = self._popup_menu_bridge
self._web_view.add_to_js_window(
'bridge_popup_menu_handler', bridge)
self.api.set_popup_menu_handler(
'bridge_popup_menu_handler.on_popup_menu')
def _set_private_bridge_handler(self, bridge_signal, handler, setter):
"""
Helper method to set handler for private bridges like the ones use for
double click and popup menu events.
:param pyqtSignal bridge_signal: A Qt signal in bridge object.
:param callable|None handler: Handler of signal. If None it
disconnects handler from graph.
:param callable setter: Internal setter method used to set bridge in
QmxGraph object, only if already loaded.
"""
try:
bridge_signal.disconnect()
except TypeError:
# It fails if tries to disconnect without any handler connected.
pass
if handler:
bridge_signal.connect(handler)
if self.is_loaded():
setter()
class ErrorHandlingBridge(QObject):
"""
Error handler on JavaScript side will use `on_error` signal to communicate
to Python any error that may'be happened.
Client code must connect to signal and handle messages in whatever manner
desired.
"""
# JavaScript client code emits this signal whenever an error happens
#
# Arguments:
# msg: str
# url: str
# line: int
# column: int
on_error = pyqtSignal(str, str, int, int, name='on_error')
class EventsBridge(QObject):
"""
A bridge object between Python/Qt and JavaScript that provides a series
of signals that are connected to events fired on JavaScript.
:ivar pyqtSignal on_cells_removed: JavaScript client code emits this
signal when cells are removed from graph. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_cells_added: JavaScript client code emits this
signal when cells are added to graph. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_label_changed: JavaScript client code emits this
signal when cell is renamed. Arguments:
- cell_id: str
- new_label: str
- old_label: str
:ivar pyqtSignal on_selection_changed: JavaScript client code emits
this signal when the current selection change. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_terminal_changed: JavaScript client code emits
this signal when a cell terminal change. Arguments:
- cell_id: str
- terminal_type: str
- new_terminal_id: str
- old_terminal_id: str
:ivar pyqtSignal on_terminal_with_port_changed: JavaScript client code emits
this signal when a cell terminal change with port information. Arguments:
- cell_id: str
- terminal_type: str
- new_terminal_id: str
- new_terminal_port_id: str
- old_terminal_id: str
- old_terminal_port_id: str
:ivar pyqtSignal on_view_update: JavaScript client code emits this
signal when the view is updated. Arguments:
- graph_view: str
- scale_and_translation: QVariantList
:ivar pyqtSignal on_cells_bounds_changed: JavaScript client code emits
this signal when some cells' bounds changes.The arguments `dict`
maps the affected `cell_id`s
to :class:`qmxgraph.cell_bounds.CellBounds` dict representations:
- changed_bounds: dict
Using this object connecting to events from JavaScript basically becomes a
matter of using Qt signals.
.. code-block::
def on_cells_added_handler(cell_ids):
print(f'added {cell_ids}')
def on_terminal_changed_handler(
cell_id, terminal_type, new_terminal_id, old_terminal_id):
print(
f'{terminal_type} of {cell_id} changed from'
f' {old_terminal_id} to {new_terminal_id}'
)
def on_cells_removed_handler(cell_ids):
print(f'removed {cell_ids}')
events_bridge = EventsBridge()
widget = ...
widget.set_events_bridge(events_bridge)
events_bridge.on_cells_added.connect(on_cells_added_handler)
events_bridge.on_cells_removed.connect(on_cells_removed_handler)
events_bridge.on_terminal_changed.connect(on_terminal_changed_handler)
"""
on_cells_removed = pyqtSignal('QVariantList', name='on_cells_removed')
on_cells_added = pyqtSignal('QVariantList', name='on_cells_added')
on_label_changed = pyqtSignal(str, str, str, name='on_label_changed')
on_selection_changed = pyqtSignal(
'QVariantList', name='on_selection_changed')
on_terminal_changed = pyqtSignal(
str, str, str, str, name='on_terminal_changed')
on_terminal_with_port_changed = pyqtSignal(
str, str, str, str, str, str, name='on_terminal_with_port_changed')
on_view_update = pyqtSignal(str, 'QVariantList', name='on_view_update')
on_cells_bounds_changed = pyqtSignal('QVariant', name='on_cells_bounds_changed')
class _DoubleClickBridge(QObject):
"""
A private bridge used for double click events in JavaScript graph.
It is private so `QmxGraph` can make sure only a single double click
handler is registered, to make sure it doesn't violate what is stated in
`set_double_click_handler` docs of `api` module.
"""
# Arguments:
# cell_id: str
on_double_click = pyqtSignal(str, name='on_double_click')
class _PopupMenuBridge(QObject):
"""
A private bridge used for popup menu events in JavaScript graph.
It is private so `QmxGraph` can make sure only a single popup menu handler
is registered, to make sure it doesn't violate what is stated in
`set_popup_menu_handler` docs of `api` module.
"""
# Arguments:
# cell_id: str
# x: int
# y: int
on_popup_menu = pyqtSignal(str, int, int, name='on_popup_menu')
| mit | -7,056,639,670,278,203,000 | 34.253012 | 96 | 0.600436 | false |
google-research/nisaba | nisaba/brahmic/fst_properties_test.py | 1 | 3617 | # Copyright 2021 Nisaba Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the sigma FST has the expected set of FST properties."""
import itertools
import pynini
from absl.testing import absltest
from absl.testing import parameterized
from nisaba.brahmic import util as u
from nisaba.utils import test_util
class FstPropertiesTest(parameterized.TestCase,
test_util.FstPropertiesTestCase):
@parameterized.parameters(
itertools.product(
u.SCRIPTS,
(pynini.ACYCLIC, pynini.UNWEIGHTED, pynini.I_DETERMINISTIC,
pynini.NO_EPSILONS, pynini.ACCESSIBLE, pynini.COACCESSIBLE,
pynini.ACCEPTOR)))
def test_sigma_utf8(self, script: str, prop: pynini.FstProperties):
fst = u.OpenFstFromBrahmicFar('sigma', script, token_type='utf8')
self.assertFstCompliesWithProperties(fst, prop)
@parameterized.parameters(
itertools.product(
u.SCRIPTS,
(pynini.CYCLIC, pynini.UNWEIGHTED, pynini.I_DETERMINISTIC,
pynini.NO_EPSILONS, pynini.ACCESSIBLE, pynini.COACCESSIBLE,
pynini.ACCEPTOR), ('byte', 'utf8')))
def test_wellformed(self, script: str, prop: pynini.FstProperties,
token_type: str):
fst = u.OpenFstFromBrahmicFar('wellformed', script, token_type=token_type)
self.assertFstCompliesWithProperties(fst, prop)
@parameterized.parameters(
itertools.product(u.SCRIPTS + ['Brahmic'],
(pynini.UNWEIGHTED, pynini.NO_EPSILONS, pynini.CYCLIC,
pynini.ACCESSIBLE, pynini.COACCESSIBLE),
('byte', 'utf8')))
def test_nfc(self, script: str, prop: pynini.FstProperties, token_type: str):
fst = u.OpenFstFromBrahmicFar('nfc', script, token_type=token_type)
self.assertFstCompliesWithProperties(fst, prop)
@parameterized.parameters(
itertools.product(u.SCRIPTS,
(pynini.UNWEIGHTED, pynini.NO_EPSILONS, pynini.CYCLIC,
pynini.ACCESSIBLE, pynini.COACCESSIBLE),
('byte', 'utf8')))
def test_visual_norm(self, script: str, prop: pynini.FstProperties,
token_type: str):
fst = u.OpenFstFromBrahmicFar('visual_norm', script, token_type=token_type)
self.assertFstCompliesWithProperties(fst, prop)
@parameterized.parameters(
itertools.product(u.FIXED_RULE_SCRIPTS,
(pynini.ACCESSIBLE,
pynini.COACCESSIBLE,
pynini.CYCLIC,
# TODO: Investigate why it is not deterministic
# pynini.I_DETERMINISTIC,
pynini.NO_EPSILONS,
pynini.UNWEIGHTED),
('byte', 'utf8')))
def test_fixed(self,
script: str,
prop: pynini.FstProperties,
token_type: str):
fst = u.OpenFstFromBrahmicFar('fixed', script, token_type=token_type)
self.assertFstCompliesWithProperties(fst, prop)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 7,504,591,750,468,167,000 | 39.640449 | 79 | 0.641416 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.