repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
saeedghsh/SSRR13 | Andreas/slam6d/3rdparty/lastools/ArcGIS_toolbox/scripts/lasground.py | 2 | 6337 | #
# lasground.py
#
# (c) 2012, Martin Isenburg
# LASSO - rapid tools to catch reality
#
# uses lasground.exe to extracts the bare-earth by classifying LIDAR
# points into ground (class = 2) and non-ground points (class = 1).
#
# The LiDAR input can be in LAS/LAZ/BIN/TXT/SHP/... format.
# The LiDAR output can be in LAS/LAZ/BIN/TXT format.
#
# for licensing details see http://rapidlasso.com/download/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def return_classification(classification):
if (classification == "created, never classified (0)"):
return "0"
if (classification == "unclassified (1)"):
return "1"
if (classification == "ground (2)"):
return "2"
if (classification == "low vegetation (3)"):
return "3"
if (classification == "medium vegetation (4)"):
return "4"
if (classification == "high vegetation (5)"):
return "5"
if (classification == "building (6)"):
return "6"
if (classification == "low point (7)"):
return "7"
if (classification == "keypoint (8)"):
return "8"
if (classification == "water (9)"):
return "9"
if (classification == "high point (10)"):
return "10"
if (classification == "(11)"):
return "11"
if (classification == "overlap point (12)"):
return "12"
if (classification == "(13)"):
return "13"
if (classification == "(14)"):
return "14"
if (classification == "(15)"):
return "15"
if (classification == "(16)"):
return "16"
if (classification == "(17)"):
return "17"
if (classification == "(18)"):
return "18"
return "unknown"
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasground ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to the LAStools binaries
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+"\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\lastools\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasground executable
lasground_path = lastools_path+"\\lasground.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasground.exe at " + lasground_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasground_path + " ...")
### create the command string for lasground.exe
command = [lasground_path]
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append(sys.argv[1])
### maybe horizontal feet
if sys.argv[2] == "true":
command.append("-feet")
### maybe vertical feet
if sys.argv[3] == "true":
command.append("-elevation_feet")
### what type of terrain do we have
if sys.argv[4] == "city or warehouses":
command.append("-city")
elif sys.argv[4] == "towns or flats":
command.append("-town")
elif sys.argv[4] == "metropolis":
command.append("-metro")
### what granularity should we operate with
if sys.argv[5] == "fine":
command.append("-fine")
elif sys.argv[5] == "extra fine":
command.append("-extra_fine")
elif sys.argv[5] == "ultra fine":
command.append("-ultra_fine")
### maybe we should ignore/preserve some existing classifications when classifying
if sys.argv[6] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[6]))
### maybe we should ignore/preserve some more existing classifications when classifying
if sys.argv[7] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[7]))
### this is where the output arguments start
out = 8
### maybe an output format was selected
if sys.argv[out] != "#":
if sys.argv[out] == "las":
command.append("-olas")
elif sys.argv[out] == "laz":
command.append("-olaz")
elif sys.argv[out] == "bin":
command.append("-obin")
elif sys.argv[out] == "xyzc":
command.append("-otxt")
command.append("-oparse")
command.append("xyzc")
elif sys.argv[out] == "xyzci":
command.append("-otxt")
command.append("-oparse")
command.append("xyzci")
elif sys.argv[out] == "txyzc":
command.append("-otxt")
command.append("-oparse")
command.append("txyzc")
elif sys.argv[out] == "txyzci":
command.append("-otxt")
command.append("-oparse")
command.append("txyzci")
### maybe an output file name was selected
if sys.argv[out+1] != "#":
command.append("-o")
command.append(sys.argv[out+1])
### maybe an output directory was selected
if sys.argv[out+2] != "#":
command.append("-odir")
command.append(sys.argv[out+2])
### maybe an output appendix was selected
if sys.argv[out+3] != "#":
command.append("-odix")
command.append(sys.argv[out+3])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasground
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasground failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasground done.")
| bsd-3-clause | -3,070,467,438,095,268,000 | 28.17619 | 130 | 0.614644 | false |
mgracik/alda | alda/test/test_alda.py | 1 | 4673 | # Copyright (C) 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Author(s): Martin Gracik <[email protected]>
#
import os
import unittest
import alda
# Packages.
BASESYSTEM = set([alda.Package(name='dummy-basesystem', arch=None)])
BASH = set([alda.Package(name='dummy-bash', arch=None)])
class ALDATestCase(unittest.TestCase):
repodir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'repo')
repodict = {'alda-repo': repodir}
def get_alda(self, options=None, arch=None):
alda_ = alda.ALDA(self.repodict, options)
alda_.load_sack(arch=arch)
return alda_
class TestDefault(ALDATestCase):
'''
Options:
greedy=False,
source=True,
selfhosting=False,
debuginfo=True,
fulltree=False,
'''
def setUp(self):
self.alda = self.get_alda(arch='x86_64')
def test_basesystem(self):
self.alda.resolve_dependencies(BASESYSTEM)
self.assertEqual(['dummy-basesystem-10.0-6.noarch', 'dummy-basesystem-10.0-6.src',
'dummy-filesystem-3-2.src', 'dummy-filesystem-3-2.x86_64',
'dummy-setup-2.8.48-1.noarch', 'dummy-setup-2.8.48-1.src'],
sorted(self.alda.installs_as_strings))
def test_bash(self):
self.alda.resolve_dependencies(BASH)
self.assertEqual(['dummy-bash-4.2.24-2.src', 'dummy-bash-4.2.24-2.x86_64',
'dummy-bash-debuginfo-4.2.24-2.x86_64'],
sorted(self.alda.installs_as_strings))
class TestNoSource(ALDATestCase):
def setUp(self):
self.alda = self.get_alda(options=dict(source=False), arch='x86_64')
def test_basesystem(self):
self.alda.resolve_dependencies(BASESYSTEM)
self.assertEqual(['dummy-basesystem-10.0-6.noarch',
'dummy-filesystem-3-2.x86_64',
'dummy-setup-2.8.48-1.noarch'],
sorted(self.alda.installs_as_strings))
def test_bash(self):
self.alda.resolve_dependencies(BASH)
self.assertEqual(['dummy-bash-4.2.24-2.x86_64',
'dummy-bash-debuginfo-4.2.24-2.x86_64'],
sorted(self.alda.installs_as_strings))
class TestSelfHosting(ALDATestCase):
def setUp(self):
self.alda = self.get_alda(options=dict(selfhosting=True), arch='x86_64')
def test_basesystem(self):
self.alda.resolve_dependencies(BASESYSTEM)
self.assertEqual(['dummy-basesystem-10.0-6.noarch', 'dummy-basesystem-10.0-6.src',
'dummy-bash-4.2.24-2.src', 'dummy-bash-4.2.24-2.x86_64',
'dummy-bash-debuginfo-4.2.24-2.x86_64',
'dummy-filesystem-3-2.src', 'dummy-filesystem-3-2.x86_64',
'dummy-setup-2.8.48-1.noarch', 'dummy-setup-2.8.48-1.src'],
sorted(self.alda.installs_as_strings))
class TestNoSourceSelfHosting(ALDATestCase):
def setUp(self):
self.alda = self.get_alda(options=dict(source=False, selfhosting=True), arch='x86_64')
def test_basesystem(self):
self.alda.resolve_dependencies(BASESYSTEM)
self.assertEqual(['dummy-basesystem-10.0-6.noarch',
'dummy-bash-4.2.24-2.x86_64',
'dummy-bash-debuginfo-4.2.24-2.x86_64',
'dummy-filesystem-3-2.x86_64',
'dummy-setup-2.8.48-1.noarch'],
sorted(self.alda.installs_as_strings))
class TestNoDebuginfo(ALDATestCase):
def setUp(self):
self.alda = self.get_alda(options=dict(debuginfo=False), arch='x86_64')
def test_bash(self):
self.alda.resolve_dependencies(BASH)
self.assertEqual(['dummy-bash-4.2.24-2.src', 'dummy-bash-4.2.24-2.x86_64'],
sorted(self.alda.installs_as_strings))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -4,398,122,757,323,748,000 | 34.401515 | 94 | 0.607105 | false |
jhanley634/testing-tools | problem/pixel/volcanic_voxels.py | 1 | 6745 | #! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from collections import namedtuple
from operator import itemgetter
from typing import List
import random
import numpy as np
Voxel = namedtuple('Voxel', 'x y z')
class Voxels:
"""Turns text input into a list of Voxels."""
def __init__(self, text: str):
depth = 1 + int(max(text))
lines = text.splitlines()
height = len(lines)
width = max(map(len, lines))
self.model = np.zeros((width, height, depth), int)
voxels = []
for j, line in enumerate(lines):
y = len(lines) - j # Origin is at lower left.
for x, ch in enumerate(line):
if ch.isnumeric(): # We ignore ocean pixels.
assert ch > '0', (ch, line)
for z in range(1, int(ch) + 1):
voxel = Voxel(x, y, z)
self.model[voxel] = True
voxels.append(voxel)
random.shuffle(voxels)
self.voxels = voxels
def render(self) -> str:
for voxel in self.voxels:
assert self.model[voxel], voxel
width, height, depth = self.model.shape
return '\n'.join(self._raster(height - 1 - y) # origin LL
for y in range(height))
def _raster(self, y) -> str:
width = self.model.shape[0]
return ''.join(self._depth_val(x, y)
for x in range(width))
def _depth_val(self, x, y) -> str:
"""Returns blank for ocean, or 1..3 for coast..mountain."""
depth = self.model.shape[2]
val = ' ' # Ocean, by default.
for z in range(depth):
if self.model[(x, y, z)]:
val = str(z)
return val
class PrintedModel:
def __init__(self, voxels: List[Voxel]):
self.model = self._get_zeros(voxels)
self._cur = voxels[0] # 3-D print head position
self.elapsed = 1 # Unit cost to move print head to initial voxel.
self._print(voxels)
def _print(self, voxels: List[Voxel]) -> None:
for voxel in voxels:
self._verify_feasible(*voxel)
self.elapsed += _manhattan_distance(self._cur, voxel)
self.model[voxel] = self.elapsed
self._cur = voxel
def _verify_feasible(self, x, y, z):
"""Ensure there is a foundation to print a mountain top upon."""
for z1 in range(1, z):
if not self.model[(x, y, z1)]:
raise ValueError(f'No support for ({x}, {y}, {z})')
@staticmethod
def _get_zeros(voxels: List[Voxel]):
assert len(voxels)
width = 1 + max(map(itemgetter(0), voxels))
height = 1 + max(map(itemgetter(1), voxels))
depth = 1 + max(map(itemgetter(2), voxels))
return np.zeros((width, height, depth), int)
def render(self):
height = self.model.shape[1]
return '\n'.join(self._raster(height - 1 - y, bool(y % 2))
for y in range(height))
def _raster(self, y, first_bold=False):
bold = first_bold
raster = []
for x in range(self.model.shape[0]):
raster.append(self._cell(x, y, bold))
bold = not bold
return ''.join(raster)
def _cell(self, x, y, bold):
cell = '..' # ocean
for z in range(self.model.shape[2]):
if self.model[(x, y, z)]:
elapsed = self.model[(x, y, z)]
cell = f'{elapsed % 100:02d}'
if bold:
esc = chr(27)
cell = f'{esc}[1m{cell}{esc}[0m'
return cell
def three_d_print(voxels: List[Voxel]) -> str:
pm = PrintedModel(voxels)
return pm.elapsed, pm.model
def _manhattan_distance(a: Voxel, b: Voxel) -> int:
return (abs(a.x - b.x)
+ abs(a.y - b.y)
+ abs(a.z - b.z))
def xyz(coord):
return coord
def xzy(coord):
return coord.x, coord.z, coord.y
def yxz(coord):
return coord.y, coord.x, coord.z
def yzx(coord):
return coord.y, coord.z, coord.x
def zyx(coord):
return tuple(reversed(coord))
def zxy(coord):
return coord.z, coord.x, coord.y
islands = Voxels("""
1
111 1121
1112211 11223211
1112233211 112321
122211 13
1211 1 1 11
1 1211 12321
1123211 121
1121 1
11
""")
# Name these islands:
# A B C D
if __name__ == '__main__':
t1, out1 = three_d_print(sorted(islands.voxels, key=xyz))
t2, out2 = three_d_print(sorted(islands.voxels, key=xzy))
t3, out3 = three_d_print(sorted(islands.voxels, key=zxy))
t4, out4 = three_d_print(sorted(islands.voxels, key=yxz))
t5, out5 = three_d_print(sorted(islands.voxels, key=yzx))
t6, out6 = three_d_print(sorted(islands.voxels, key=zyx))
# output: 246 246 406 542 760 827 False False False False False
print(t1, t2, t3, t4, t5, t6,
np.array_equal(out1, out2),
np.array_equal(out1, out3),
np.array_equal(out1, out4),
np.array_equal(out1, out5),
np.array_equal(out1, out6))
# print(three_d_print(islands.voxels)) # fails due to No Support
pm = PrintedModel(sorted(islands.voxels))
print(pm.render())
# volcanic voxels
#
# Some volcanic islands are depicted above.
# A 3-D printer will create a model of them.
# The input of (x, y, z) voxels is now in a randomly permuted order.
# Write a function that puts the voxels in "better than naïve" order.
| mit | 140,680,134,709,253,020 | 31.897561 | 76 | 0.575326 | false |
gjhiggins/sprox | tests/test_fillerbase.py | 1 | 3808 | from sprox.fillerbase import FillerBase, TableFiller, EditFormFiller, AddFormFiller, FormFiller, ConfigBaseError
from sprox.test.base import setup_database, sorted_user_columns, SproxTest, User, Example
from nose.tools import raises, eq_
session = None
engine = None
connection = None
trans = None
def setup():
global session, engine, metadata, trans
session, engine, metadata = setup_database()
class UserFiller(TableFiller):
__entity__ = User
class TestFillerBase(SproxTest):
def setup(self):
super(TestFillerBase, self).setup()
class UserFiller(FillerBase):
__entity__ = User
self.filler = UserFiller(session)
def test_get_value(self):
value = self.filler.get_value()
assert value =={}, value
class TestTableFiller(SproxTest):
def setup(self):
super(TestTableFiller, self).setup()
self.filler = UserFiller(session)
def test_create(self):
pass
def test_get_value(self):
value = self.filler.get_value()
eq_(len(value), 1)
value = value[0]
eq_(value['groups'], u'4')
eq_(value['town'], 'Arvada')
def test_get_value_with_binary_field(self):
class ExampleFiller(TableFiller):
__entity__ = Example
example = Example(binary=b'datadatadata')
session.add(example)
filler = ExampleFiller(session)
value = filler.get_value()
eq_(value[0]['binary'], '<file>')
def test_get_list_data_value_array_values(self):
r = self.filler._get_list_data_value(User, ['something', 'something else'])
assert r == ['something', 'something else'], r
@raises(ConfigBaseError)
def test_count_without_get(self):
self.filler.get_count()
def test_count(self):
self.filler.get_value()
c = self.filler.get_count()
assert c == 1, c
def test_possible_field_name_dict(self):
class UserFiller(TableFiller):
__entity__ = User
__possible_field_names__ = {'groups': 'group_name'}
filler = UserFiller(session)
value = filler.get_value()
eq_(value[0]['groups'], '5')
def test_possible_field_name_list(self):
class UserFiller(TableFiller):
__entity__ = User
__possible_field_names__ = ['_name']
filler = UserFiller(session)
value = filler.get_value()
eq_(value[0]['groups'], '4')
def test_possible_field_name_default(self):
class UserFiller(TableFiller):
__entity__ = User
__possible_field_names__ = {}
filler = UserFiller(session)
value = filler.get_value()
eq_(value[0]['groups'], '5')
class TestEditFormFiller(SproxTest):
def setup(self):
super(TestEditFormFiller, self).setup()
self.filler = EditFormFiller(session)
self.filler.__entity__ = User
def test_create(self):
pass
def test_get_value(self):
value = self.filler.get_value(values={'user_id':1})
eq_(value['groups'], [5])
eq_(value['town'], 1)
def test_get_value_method(self):
class FillerWithMethod(EditFormFiller):
__entity__ = User
def town(self, obj):
return 'Unionville'
filler = FillerWithMethod(session)
value = filler.get_value(values={'user_id':1})
assert value['town']== 'Unionville', value['town']
class TestAddFormFiller(SproxTest):
def setup(self):
super(TestAddFormFiller, self).setup()
self.filler = AddFormFiller(session)
self.filler.__entity__ = User
def test_create(self):
pass
def test_get_value(self):
value = self.filler.get_value(values={'user_id':1})
eq_(value['user_id'], 1)
| mit | 5,949,031,857,149,141,000 | 28.984252 | 112 | 0.59979 | false |
AlexHill/django | django/db/models/fields/related.py | 1 | 77816 | from operator import attrgetter
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import signals, Q
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = cls._meta.app_cache.get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
cls._meta.app_cache.pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in sender._meta.app_cache.pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
class RelatedField(Field):
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', DeprecationWarning),
('get_prefetch_query_set', 'get_prefetch_queryset', DeprecationWarning),
)
class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
return self.related.model._base_manager.db_manager(hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.opts.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
if None in related_pk:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name)
)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.to` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.rel.to.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
rel_mgr = self.field.rel.to._default_manager.db_manager(hints=hints)
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
return rel_mgr
else:
return QuerySet(self.field.rel.to, hints=hints)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
if self.field.rel.is_hidden():
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.rel.to._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
for lh_field, rh_field in self.field.related_fields:
try:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
except AttributeError:
setattr(instance, lh_field.attname, None)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel_field, rel_model):
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel_field, rel_model)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % rel_field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.commit_on_success_unless_managed(
using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" %
(self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = rel_field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.update(**{rel_field.name: None})
else:
with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
for obj in queryset:
setattr(obj, rel_field.name, None)
obj.save(update_fields=[rel_field.name])
_clear.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
return create_foreign_related_manager(
self.related.model._default_manager.__class__,
self.related.field,
self.related.model,
)
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.target_field = through._meta.get_field(target_field_name)
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel)
return manager_class(
model=self.model,
query_field_name=self.query_field_name,
instance=self.instance,
symmetrical=self.symmetrical,
source_field_name=self.source_field_name,
target_field_name=self.target_field_name,
reverse=self.reverse,
through=self.through,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (queryset,
lambda result: tuple(getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields),
lambda inst: tuple(getattr(inst, f.attname) for f in fk.foreign_related_fields),
False,
self.prefetch_cache_name)
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.related_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
# clear() can change expected output of 'value' queryset, we force evaluation
# of queryset before clear; ticket #19816
value = tuple(value)
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
related_accessor_class = ForeignRelatedObjectsDescriptor
def __init__(self, to, from_fields, to_fields, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot been resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
opts = instance._meta
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if not possible_parent_link or possible_parent_link.primary_key:
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(
(Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for index, source in enumerate(sources):
root_constraint.add(
(Constraint(alias, targets[index].column, sources[index]), lookup_type,
value[index]), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
root_constraint.add(
(Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for index, target in enumerate(targets):
value_constraint.add(
(Constraint(alias, target.column, sources[index]), 'exact', value[index]),
AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
if self.rel.on_delete is not CASCADE:
kwargs['on_delete'] = self.rel.on_delete
# Rel needs more work.
if self.rel.field_name:
kwargs['to_field'] = self.rel.field_name
return name, path, args, kwargs
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'model': self.rel.to._meta.verbose_name, 'pk': value},
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value is None:
return None
else:
return self.related_field.get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
related_accessor_class = SingleRelatedObjectDescriptor
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'app_cache': field.model._meta.app_cache,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it here to break early if there's a problem.
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
db_constraint=db_constraint,
)
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments
if self.rel.db_constraint is not True:
kwargs['db_constraint'] = self.rel.db_constraint
if "help_text" in kwargs:
del kwargs['help_text']
# Rel needs more work.
if isinstance(self.rel.to, six.string_types):
kwargs['to'] = self.rel.to
else:
kwargs['to'] = "%s.%s" % (self.rel.to._meta.app_label, self.rel.to._meta.object_name)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct an indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| bsd-3-clause | -1,930,050,891,207,236,900 | 44.373761 | 228 | 0.599954 | false |
jomivega/ASE4156 | tests/test_stocks.py | 1 | 3024 | """This module is for testing stocks"""
from unittest import mock
from django.test import TestCase
from stocks.models import Stock, DailyStockQuote
import pandas as pd
from yahoo_historical import Fetcher
from authentication.plaid_middleware import PlaidMiddleware
import pytest
class StocksViewTests(TestCase):
"""
Testing Stocks Model
"""
@classmethod
def setup_class(cls):
"""Setting up testing"""
cls._original_init_method = Fetcher.__init__
Fetcher.__init__ = mock.Mock(return_value=None)
PlaidMiddleware.__call__ = lambda self, request: self.get_response(request)
@classmethod
def teardown_class(cls):
"""Teardown testing"""
Fetcher.__init__ = cls._original_init_method
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(return_value=pd.DataFrame({
'Close': [1.5, 2.5],
'Date': ["2017-05-05", "2017-05-06"],
}))
)
@pytest.mark.django_db(transaction=True)
def test_api_for_real_stock(self):
"""
Testing adding stock via endpoint, asserting stock is inserted
"""
ticker = "googl"
name = "Google"
data = {'name': name, 'ticker': ticker}
request = self.client.post('/stocks/addstock/', data)
self.assertEqual(request.status_code, 200)
data = Stock.objects.all()
self.assertEqual(len(data), 1)
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(side_effect=KeyError('abc'))
)
def test_api_for_invalid_ticker(self):
"""
Testing adding stock via endpoint, asserting stock is inserted but no
data added to DailyStockQuote since ticker is invalid
"""
ticker = "xxx"
name = "Julian"
data = {'name': name, 'ticker': ticker}
request = self.client.post('/stocks/addstock/', data)
self.assertEqual(request.status_code, 500)
data = DailyStockQuote.objects.all()
self.assertEqual(len(data), 0)
def test_api_with_invalid_call(self):
"""
Endpoint only works with POST
"""
request = self.client.get('/stocks/addstock/')
self.assertEqual(request.status_code, 405)
@mock.patch.object(
Fetcher,
'getHistorical',
mock.MagicMock(return_value=pd.DataFrame({
'Close': [1.5, 2.5],
'Date': ["2017-05-05", "2017-05-06"],
}))
)
@pytest.mark.django_db(transaction=True)
def test_fill_quote_history(self):
"""
Filling data for Stock
"""
ticker = "ibm"
name = "IBM"
data = {'name': name, 'ticker': ticker}
request = self.client.get('/stocks/addstock/', data)
stock_id = request.content
data = DailyStockQuote.objects.filter(stock_id=stock_id)
stock_data = Stock.objects.filter(id=stock_id)
self.assertGreater(len(data), 0)
self.assertEqual(len(stock_data), 1)
| apache-2.0 | -3,050,215,387,650,206,700 | 31.170213 | 83 | 0.59623 | false |
erigones/esdc-ce | gui/migrations/0004_alerting_fields_initialization.py | 1 | 1297 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models, transaction
# noinspection PyUnusedLocal
def initialize_alerting_fields(apps, schema_editor):
former_user_profile = apps.get_model('gui', 'UserProfile')
if former_user_profile.objects.count() > 10000:
warning_text = '\n It looks like there is a lot of users in your database and ' \
'it would take a lot of time to update their profiles. This migration is therefore skipped. ' \
'Please perform this operation manually.'
print(warning_text)
else:
with transaction.atomic():
# Cannot user F() expressions to joined tables
for user_profile in former_user_profile.objects.select_related('user__email').iterator():
user_profile.alerting_email = user_profile.user.email
user_profile.alerting_phone = user_profile.phone
user_profile.alerting_jabber = user_profile.jabber
user_profile.save()
class Migration(migrations.Migration):
dependencies = [
('gui', '0003_add_alerting_user_fields'),
]
operations = [
migrations.RunPython(initialize_alerting_fields, reverse_code=lambda x, y: None)
]
| apache-2.0 | 4,412,531,453,637,234,700 | 39.53125 | 118 | 0.643793 | false |
PaulWay/insights-core | insights/parsers/date.py | 1 | 3048 | """
date - Command
==============
This module provides processing for the output of the ``date`` command.
The specs handled by this command inlude::
"date" : CommandSpec("/bin/date"),
"date_utc" : CommandSpec("/bin/date --utc"),
Class ``Date`` parses the output of the ``date`` command. Sample output of
this command looks like::
Fri Jun 24 09:13:34 CST 2016
Class ``DateUTC`` parses the output of the ``date --utc`` command. Output is
similar to the ``date`` command except that the `Timezone` column uses UTC.
All classes utilize the same base class ``DateParser`` so the following
examples apply to all classes in this module.
Examples:
>>> from insights.parsers.date import Date, DateUTC
>>> from insights.tests import context_wrap
>>> date_content = "Mon May 30 10:49:14 CST 2016"
>>> shared = {Date: Date(context_wrap(date_content))}
>>> date_info = shared[Date]
>>> date_info.data
'Mon May 30 10:49:14 CST 2016'
>>> date_info.datetime is not None
True
>>> date_info.timezone
'CST'
>>> date_content = "Mon May 30 10:49:14 UTC 2016"
>>> shared = {DateUTC: DateUTC(context_wrap(date_content))}
>>> date_info = shared[DateUTC]
>>> date_info.data
'Mon May 30 10:49:14 UTC 2016'
>>> date_info.datetime
datetime.datetime(2016, 5, 30, 10, 49, 14)
>>> date_info.timezone
'UTC'
"""
import sys
from datetime import datetime
from .. import Parser, parser, get_active_lines
class DateParseException(Exception):
pass
class DateParser(Parser):
"""Base class implementing shared code."""
def parse_content(self, content):
"""
Parses the output of the ``date`` and ``date --utc`` command.
Sample: Fri Jun 24 09:13:34 CST 2016
Sample: Fri Jun 24 09:13:34 UTC 2016
Attributes
----------
datetime: datetime.datetime
A native datetime.datetime of the parsed date string
timezone: str
The string portion of the date string containing the timezone
Raises:
DateParseException: Raised if any exception occurs parsing the
content.
"""
self.data = get_active_lines(content, comment_char="COMMAND>")[0]
parts = self.data.split()
if not len(parts) == 6:
msg = "Expected six date parts. Got [%s]"
raise DateParseException(msg % self.data)
try:
self.timezone = parts[4]
no_tz = ' '.join(parts[:4]) + ' ' + parts[-1]
self.datetime = datetime.strptime(no_tz, '%a %b %d %H:%M:%S %Y')
except:
raise DateParseException(self.data), None, sys.exc_info()[2]
@parser("date")
class Date(DateParser):
"""
Class to parse ``date`` command output.
Sample: Fri Jun 24 09:13:34 CST 2016
"""
pass
@parser("date_utc")
class DateUTC(DateParser):
"""
Class to parse ``date --utc`` command output.
Sample: Fri Jun 24 09:13:34 UTC 2016
"""
pass
| apache-2.0 | -1,107,753,871,417,200,300 | 27.485981 | 77 | 0.603675 | false |
ihciah/xk-database | views/admin.py | 1 | 5958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask import g, request, flash
from flask import render_template, redirect
from models import Student,Account,Course,Xk,Teacher
from forms import SearchStudentFrom,adminProfileForm,UserProfileForm,SearchForm,CourseEditForm,UseraddForm
from utils import get_current_user,require_admin,transt2line,transtea2line
__all__ = ['bp']
bp = Blueprint('admin',__name__)
@bp.route('/',methods=['GET'])
@require_admin
def home():
user_count=Account.query.filter(Account.role!=2).count()
course_count=Course.query.count()
return render_template('admin/admin.html',user_count=user_count,course_count=course_count)
@bp.route('/userlist',methods=['GET','POST'])
@require_admin
def userlist():
if request.method == 'GET':
return render_template('admin/user_search.html')
form = SearchStudentFrom(request.form)
if form.validate():
[result_student,result_teacher]=form.dosearch()
return render_template('admin/user_search_result.html',result_student=result_student,result_teacher=result_teacher)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/user_search.html')
@bp.route('/profile',methods=['GET','POST'])
@require_admin
def profile():
user = Student.query.get(g.user.username)
if request.method == 'GET':
return render_template('admin/profile.html')
form = adminProfileForm(request.form)
if form.validate():
form.save()
flash(u"资料成功更新!")
user = Student.query.get(g.user.username)
render_template('admin/profile.html')
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/profile.html')
@bp.route('/stu-course',methods=['GET'])
@require_admin
def stu_course():
#查看学生选课、选课退课
uid=request.args.get('id')
if uid is None or uid=='':
return redirect("/admin/userlist")
return render_template('admin/stu_course.html',result=Student.query.get(uid),uid=uid)
@bp.route('/user-profile',methods=['GET','POST'])
@require_admin
def user_profile():
#修改教师/学生资料
if request.method == 'GET':
uid=request.args.get('id')
if uid is None:
return redirect("/admin/userlist")
user=Student.query.get(uid)
if user is None:
user=Teacher.query.get(uid)
if user is None:
return redirect("/admin/userlist")
return render_template('admin/user_profile.html',stu=None,tea=user)
return render_template('admin/user_profile.html',stu=user,tea=None)
form=UserProfileForm(request.form)
if form.validate():
form.save()
flash(u"资料成功更新!")
#current_app.logger.debug(3)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
#current_app.logger.debug(2)
if form.stuid is not None and form.stuid.data!='':
user=Student.query.get(form.stuid.data)
return render_template('admin/user_profile.html',stu=user,tea=None)
else:
user=Teacher.query.get(form.teaid.data)
return render_template('admin/user_profile.html',stu=None,tea=user)
@bp.route('/course',methods=['GET','POST'])
@require_admin
def course():
if request.method == 'GET':
return render_template('admin/courselist.html')
form = SearchForm(request.form)
if form.validate():
sres=form.search()
return render_template('admin/courselist.html',result=sres)
return render_template('admin/courselist.html')
@bp.route('/course-edit',methods=['GET','POST'])
@require_admin
def course_edit():
if request.method == 'GET':
code=request.args.get('id')
if code is None or code=='':
course=None
times=None
teas=None
type=1#1:new;0:edit
else:
type=0
course=Course.query.get(code)
if course is None:
return redirect("/admin/course")
times=transt2line(course.ctime)
teas=transtea2line(course.teacher)
return render_template('admin/course_edit.html',type=type,course=course,times=times,teas=teas)
form = CourseEditForm(request.form)
course=times=teas=None
if form.validate():
course=form.save()
flash(u"课程保存成功!")
else:
course=Course.query.get(form.code.data)
if course is not None:
times=transt2line(course.ctime)
teas=transtea2line(course.teacher)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/course_edit.html',type=0,course=course,times=times,teas=teas)
@bp.route('/useradd',methods=['GET', 'POST'])
@require_admin
def signup():
roles={1:"stu",2:"admin",3:"teacher"}
if request.method == 'GET':
return render_template('admin/useradd.html')
form = UseraddForm(request.form)
if form.validate():
uid,type=form.save()
flash(u"用户添加成功!")
if type==1 or type==3:
return redirect("/admin/user-profile?id="+uid)
return redirect("/admin/useradd")
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/useradd.html')
@bp.route('/course-stu',methods=['GET'])
@require_admin
def course_stu():
cid=request.args.get('id')
if cid is None or cid=='':
return redirect("/admin/course")
result_student=Student.query.join(Xk, Xk.stuid==Student.stuid).filter(Xk.code==cid).all()
return render_template('admin/course_user.html',result_student=result_student,result_teacher=None,courseid=cid) | gpl-2.0 | -1,314,900,180,352,519,200 | 34.810976 | 123 | 0.65344 | false |
xiang12835/python_web | py2_web2py/web2py/gluon/packages/dal/pydal/representers/postgre.py | 3 | 1368 | from ..adapters.postgres import Postgre, PostgreNew
from .base import SQLRepresenter, JSONRepresenter
from . import representers, before_type, for_type
from ..helpers.serializers import serializers
@representers.register_for(Postgre)
class PostgreRepresenter(SQLRepresenter, JSONRepresenter):
def _make_geoextra(self, field_type):
srid = 4326
geotype, params = field_type[:-1].split('(')
params = params.split(',')
if len(params) >= 2:
schema, srid = params[:2]
return {'srid': srid}
@before_type('geometry')
def geometry_extras(self, field_type):
return self._make_geoextra(field_type)
@for_type('geometry', adapt=False)
def _geometry(self, value, srid):
return "ST_GeomFromText('%s',%s)" % (value, srid)
@before_type('geography')
def geography_extras(self, field_type):
return self._make_geoextra(field_type)
@for_type('geography', adapt=False)
def _geography(self, value, srid):
return "ST_GeogFromText('SRID=%s;%s')" % (srid, value)
@for_type('jsonb', encode=True)
def _jsonb(self, value):
return serializers.json(value)
@representers.register_for(PostgreNew)
class PostgreArraysRepresenter(PostgreRepresenter):
def _listify_elements(self, elements):
return "{" + ",".join(str(el) for el in elements) + "}"
| apache-2.0 | 1,911,755,367,537,962,500 | 32.365854 | 63 | 0.658626 | false |
ccmbioinfo/mugqic_pipelines | bfx/trimmomatic.py | 1 | 3305 | #!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import logging
import os
# MUGQIC Modules
from core.config import *
from core.job import *
log = logging.getLogger(__name__)
def trimmomatic(
input1,
input2,
paired_output1,
unpaired_output1,
paired_output2,
unpaired_output2,
single_output,
quality_offset,
adapter_file,
trim_log
):
if input2: # Paired end reads
inputs = [input1, input2]
outputs = [paired_output1, unpaired_output1, paired_output2, unpaired_output2]
else: # Single end reads
inputs = [input1]
outputs = [single_output]
headcrop_length = config.param('trimmomatic', 'headcrop_length', required=False, type='posint')
return Job(
inputs,
outputs + [trim_log],
[
['trimmomatic', 'module_java'],
['trimmomatic', 'module_trimmomatic']
],
# CAUTION: Trimmomatic settings order is IMPORTANT!
# FIRST Illuminaclip settings, THEN headcrop length, THEN trailing min quality, THEN minimum length
command="""\
java -XX:ParallelGCThreads=1 -Xmx{ram} -jar $TRIMMOMATIC_JAR {mode} \\
-threads {threads} \\
-phred{quality_offset} \\
{inputs} \\
{outputs} \\
ILLUMINACLIP:{adapter_file}{illumina_clip_settings}{headcrop_length} \\
TRAILING:{trailing_min_quality} \\
MINLEN:{min_length}{tophred33} \\
2> {trim_log}""".format(
ram=config.param('trimmomatic', 'ram'),
mode = "PE" if input2 else "SE",
threads=config.param('trimmomatic', 'threads', type='posint'),
quality_offset=quality_offset if quality_offset == 64 else "33",
inputs=" \\\n ".join(inputs),
outputs=" \\\n ".join(outputs),
adapter_file=adapter_file,
illumina_clip_settings=config.param('trimmomatic', 'illumina_clip_settings'),
headcrop_length=" \\\n HEADCROP:" + str(headcrop_length) if headcrop_length else "",
trailing_min_quality=config.param('trimmomatic', 'trailing_min_quality', type='int'),
min_length=config.param('trimmomatic', 'min_length', type='posint'),
tophred33=" \\\n TOPHRED33" if quality_offset == 64 else "",
trim_log=trim_log
),
removable_files=[paired_output1, unpaired_output1, paired_output2, unpaired_output2, single_output]
)
| lgpl-3.0 | 8,791,786,266,534,984,000 | 36.134831 | 107 | 0.634493 | false |
hstau/manifold-cryo | fit_1D_open_manifold_3D.py | 1 | 5015 | import numpy as np
import get_fit_1D_open_manifold_3D_param
import solve_d_R_d_tau_p_3D
import a
from scipy.io import loadmat
import matplotlib.pyplot as plt
#import matplotlib.pyplot as plt
'''
function [a,b,tau] = fit_1D_open_manifold_3D(psi)
%
% fit_1D_open_manifold_3D
%
% fit the eigenvectors for a 1D open manifold to the model
% x_ij = a_j cos(j*pi*tau_i) + b_j.
%
% j goes from 1 to 3 (this is only for 3D systems).
%
% i goes from 1 to nS where nS is the number of data points to be fitted.
%
% For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are
% obtained by putting dR/d(tau_i) to zero.
%
% For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are
% obtained by solving 3 sets of 2x2 linear equations.
%
% Fit parameters and initial set of {\tau} are specified in
%
% get_fit_1D_open_manifold_3D_param.m
%
% copyright (c) Russell Fung 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global p nDim a b x x_fit
'''
'''
def plot_fitted_curve(hFig):
global x x_fit
h = plt.figure(hFig)
hsp = plt.subplot(2,2,1)
plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1);
hold on
plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1);
hold off
set(hsp,'lineWidth',2,'fontSize',15);
hsp = subplot(2,2,2);
plotRF(hsp,x(:,1),x(:,2),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.');
hsp = subplot(2,2,3);
plotRF(hsp,x(:,1),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.');
hsp = subplot(2,2,4);
plotRF(hsp,x(:,2),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.');
drawnow
%end
'''
eps = 1e-4
#global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result
def op(psi):
a.init()
#global p, nDim, a, b, x, x_fit
a.nDim = 3
#tau = get_fit_1D_open_manifold_3D_param
tau = get_fit_1D_open_manifold_3D_param.op(psi)
aux = np.zeros((tau.shape[0],5)) #added
nS = a.x.shape[0]
for iter in xrange(1,a.maxIter+1):
string ='iteration ' + str(iter)
print string
'''
#%%%%%%%%%%%%%%%%%%%%%
#% solve for a and b %
#%%%%%%%%%%%%%%%%%%%%%
'''
a_old = a.a
b_old = a.b
j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]]))
cos_j_pi_tau = np.cos(j_pi_tau)
A11 = np.sum(cos_j_pi_tau**2, axis=0)
A12 = np.sum(cos_j_pi_tau, axis=0)
A21 = A12
A22 = nS
x_cos_j_pi_tau = a.x*cos_j_pi_tau
b1 = np.sum(x_cos_j_pi_tau, axis=0)
b2 = np.sum(a.x, axis=0)
coeff = np.zeros((2,3))
for qq in xrange(3):
A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]])
b = np.array([b1[qq], b2[qq]])
coeff[:,qq] = np.linalg.solve(A,b)
a.a = coeff[0,:]
a.b = coeff[1,:]
'''
%%%%%%%%%%%%%%%%%%%%%%%%%
#% plot the fitted curve %
%%%%%%%%%%%%%%%%%%%%%%%%%
'''
j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi
cos_j_pi_tau = np.cos(j_pi_tau)
tmp = a.a*cos_j_pi_tau
a.x_fit = tmp + a.b
#%plot_fitted_curve(iter)
'''
%%%%%%%%%%%%%%%%%
#% solve for tau %
%%%%%%%%%%%%%%%%%
'''
tau_old = tau
for a.p in xrange(nS):
tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added
for kk in xrange(beta.shape[0]):
aux[a.p,kk] = beta[kk]
'''
if iter == 0:
data = loadmat('aux0.mat') # (this is for < v7.3
elif iter == 1:
data = loadmat('aux1.mat') # (this is for < v7.3
else:
data = loadmat('aux2.mat') # (this is for < v7.3
imaux = data['aux']
plt.subplot(2, 2, 1)
plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1)
plt.title('aux')
plt.subplot(2, 2, 2)
plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1)
plt.title('imaux')
plt.show()
'''
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% calculate the changes in fitting parameters %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps)
delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps)
delta_tau = np.fabs(tau-tau_old)
delta_a = max(delta_a)*100
delta_b = max(delta_b)*100
delta_tau = max(delta_tau)
print ' changes in fitting parameters: \n'
string = ' amplitudes: '+ str(delta_a) + '\n' + \
' offsets: ' + str(delta_b) + ' \n' +\
' values of tau: ' + str(delta_tau) + ' \n'
print string
if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max):
break
return (a.a,a.b,tau)
| gpl-2.0 | 2,362,552,218,448,211,500 | 30.147436 | 99 | 0.465803 | false |
tchellomello/home-assistant | homeassistant/components/dsmr/__init__.py | 1 | 1324 | """The dsmr component."""
import asyncio
from asyncio import CancelledError
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DATA_TASK, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config: dict):
"""Set up the DSMR platform."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DSMR from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
task = hass.data[DOMAIN][entry.entry_id][DATA_TASK]
# Cancel the reconnect task
task.cancel()
try:
await task
except CancelledError:
pass
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| apache-2.0 | 6,681,127,316,997,422,000 | 23.518519 | 80 | 0.649547 | false |
tjmonsi/cmsc129-2016-repo | submissions/exercise4/pitargue/interpreter/syntax_analyzer.py | 1 | 33315 | from . import evaluate
class Parser():
def __init__(self, variables):
self.lexemes = []
self.lookahead = None
self.tokens = []
self.parse_tree = []
self.variables = variables
self.types = {
'TRUE_KEYWORD': 'boolean',
'FALSE_KEYWORD': 'boolean',
'INTEGER_LITERAL': 'int',
'FLOAT_LITERAL': 'float',
'STRING_LITERAL': 'string'
}
def nextLexeme(self):
if self.lexemes:
self.lookahead = self.lexemes.pop(0)
# print(self.lookahead[1])
else:
self.lookahead = ('eof', 'END OF FILE')
def assert_next(self, expected_value, error_message):
if self.lookahead[0] == expected_value:
self.nextLexeme()
return True
else:
print(error_message + ' before ' + self.lookahead[1])
return False
def assert_delimiter(self):
self.assert_next('SEMICOLON_KEYWORD', 'expected semicolon')
def check_next(self, expected_values):
if len(expected_values) == 1:
return self.lookahead[0] == expected_values[0]
for value in expected_values:
if self.lookahead[0] == value:
return True
return False
def parse(self, lexemes):
self.lexemes = lexemes
self.nextLexeme()
while not self.lookahead[0] == 'eof':
t = self.statement()
if isinstance(t, list):
self.parse_tree.extend(t)
else:
self.parse_tree.append(t)
return self.parse_tree
def codeblock(self):
stmts = []
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD', 'eof']):
t = self.statement()
if isinstance(t, list):
stmts.extend(t)
else:
stmts.append(t)
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
return stmts
def statement(self):
# STATEMENT := EXPRESSION | INPUT | OUTPUT | COMMENT | IFSTMT |
# SWITCHSTMT | LOOPSTMT | FUNCTIONDEC | RETURN |
# break | continue
if self.check_next(['INPUT_KEYWORD']):
return self.input()
elif self.check_next(['OUTPUT_KEYWORD']):
return self.output()
elif self.check_next(['VAR_KEYWORD']):
return self.vardec()
elif self.check_next(['SINGLE_LINE_COMMENT']):
self.nextLexeme()
elif self.check_next(['IF_KEYWORD']):
return self.ifstmt()
elif self.check_next(['SWITCH_KEYWORD']):
self.switch()
elif self.check_next(['WHILE_KEYWORD']):
return self.while_loop()
elif self.check_next(['DO_KEYWORD']):
return self.do_while_loop()
elif self.check_next(['FOR_KEYWORD']):
return self.for_loop()
elif self.check_next(['FOREACH_KEYWORD']):
self.foreach_loop()
elif self.check_next(['FUNCTION_KEYWORD']):
self.function()
elif self.check_next(['RETURN_KEYWORD']):
self.returnstmt()
elif self.check_next(['BREAK_KEYWORD']):
return self.breakstmt()
elif self.check_next(['CONTINUE_KEYWORD']):
return self.continuestmt()
elif self.check_next(['OPEN_KEYWORD']):
return self.openstmt()
elif self.check_next(['WRITE_KEYWORD']):
return self.writestmt()
elif self.check_next(['WRITELINE_KEYWORD']):
return self.writelinestmt()
elif self.check_next(['APPEND_KEYWORD']):
return self.appendstmt()
elif self.check_next(['IDENTIFIER']):
cur = self.lookahead
self.nextLexeme()
if self.check_next(['EQUAL_SIGN_KEYWORD']):
ass = self.assignment(cur[1], None)
self.assert_delimiter()
return ass;
elif self.check_next(['INCREMENT_KEYWORD']):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Increment(self.variables, cur[1])
elif self.check_next(['DECREMENT_KEYWORD']):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Decrement(self.variables, cur[1])
elif self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['EQUAL_SIGN_KEYWORD']):
ass = self.assignment(cur[1], pos)
self.assert_delimiter()
return ass;
return evaluate.Variable(self.variables, cur[1], pos)
else:
print('unknown statement at ' + cur[1])
if self.check_next(['SEMICOLON_KEYWORD']):
self.nextLexeme()
else:
print('unknown statement at ' + self.lookahead[1])
self.nextLexeme()
def input(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
mess = None
self.assert_next('IDENTIFIER', 'expected identifier')
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['COMMA_KEYWORD']):
self.nextLexeme()
mess = self.lookahead[1]
self.assert_next('STRING_LITERAL', 'expected string literal')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Input(self.variables, name, mess)
def output(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Output(expr)
def appendstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Append(self.variables, name, expr)
def openstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
path = None
if self.check_next(['IDENTIFIER']):
path = evaluate.Variable(self.variables, self.lookahead[1], None)
self.nextLexeme()
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
elif self.check_next(['STRING_LITERAL']):
path = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('COMMA_KEYWORD', 'expected ,')
mode = self.lookahead[1]
self.assert_next('STRING_LITERAL', 'expected string literal')
self.assert_next('COMMA_KEYWORD', 'expected ,')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Open(path, mode, name, self.variables)
def writestmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
value = None
if self.check_next(['IDENTIFIER']):
source_iden = self.lookahead[1]
self.nextLexeme()
pos = None
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
value = evaluate.Variable(self.variables, source_iden, pos)
elif self.check_next(['STRING_LITERAL']):
value = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Write(self.variables, name, value)
def writelinestmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
value = None
if self.check_next(['IDENTIFIER']):
source_iden = self.lookahead[1]
self.nextLexeme()
pos = None
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
value = evaluate.Variable(self.variables, source_iden, pos)
elif self.check_next(['STRING_LITERAL']):
value = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.WriteLine(self.variables, name, value)
def assignment(self, var_name, pos):
self.assert_next('EQUAL_SIGN_KEYWORD', 'expected =')
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
vals = [];
while not self.check_next(['CLOSE_BRACKET_KEYWORD']):
expr = self.expression()
if expr:
vals.append(expr)
if not self.check_next(['CLOSE_BRACKET_KEYWORD', 'SEMICOLON_KEYWORD', 'eof']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
print('expected ] before ' + self.lookahead[1])
break
else:
if not self.check_next(['CLOSE_BRACKET_KEYWORD', 'SEMICOLON_KEYWORD', 'eof']):
print('expected ] before ' + self.lookahead[1])
break
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
return evaluate.Assignment(self.variables, var_name, pos, ('array', vals))
else:
expr = self.expression()
return evaluate.Assignment(self.variables, var_name, pos, ('single', expr))
def vardec(self):
self.nextLexeme()
name = self.lookahead[1]
varde = []
if self.assert_next('IDENTIFIER', 'expected identifier'):
self.variables[name] = {
'type': 'undefined',
'value': None
}
varde.append(evaluate.VarDec(self.variables, name))
if self.check_next(['EQUAL_SIGN_KEYWORD']):
# self.nextLexeme()
# if self.check_next(['OPEN_BRACKET_KEYWORD']):
# self.nextLexeme()
# while not self.check_next(['CLOSE_BRACKET_KEYWORD']):
# self.expression()
# if not self.check_next(['CLOSE_BRACKET_KEYWORD']):
# self.assert_next('COMMA_KEYWORD', 'expected comma')
# self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# else:
# self.expression()
varde.append(self.assignment(name, None))
self.assert_delimiter()
return varde
def ifstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
then = self.codeblock()
elsif_cond = None
elsif_block = None
if self.check_next(['ELSIF_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
elsif_cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
elsif_block = self.codeblock()
else_block = None
if self.check_next(['ELSE_KEYWORD']):
self.nextLexeme()
else_block = self.codeblock()
return evaluate.IfThenElse(cond, then, elsif_cond, elsif_block, else_block)
def switch(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
if self.variable():
self.nextLexeme()
else:
print('expected variable identifier before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD', 'eof']):
if self.check_next(['DEFAULT_KEYWORD']):
break
self.caseblock()
if self.check_next(['DEFAULT_KEYWORD']):
self.nextLexeme()
self.assert_next('COLON_KEYWORD', 'expected :')
self.codeblock()
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
def caseblock(self):
self.assert_next('CASE_KEYWORD', 'expected case')
if self.literal():
self.nextLexeme()
else:
print('expected literal at ' + self.lookahead[1])
self.assert_next('COLON_KEYWORD', 'expected :')
# self.assert_next('INTEGER_LITERAL', 'expected literal')
self.codeblock()
def while_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
loop = self.codeblock()
return evaluate.WhileLoop(cond, loop)
def do_while_loop(self):
self.nextLexeme()
loop = self.codeblock()
self.assert_next('WHILE_KEYWORD', 'expected while')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.DoWhileLoop(loop, cond)
def for_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
if self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.nextLexeme()
else:
init = self.statement()
cond = self.expression()
self.assert_delimiter()
last = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
loop = self.codeblock()
return evaluate.ForLoop(init, cond, loop, last)
def foreach_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('IN_KEYWORD', 'expected in')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
def function(self):
self.nextLexeme()
self.assert_next('IDENTIFIER', 'expected function identifier')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('IDENTIFIER', 'expected identifier')
if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
def returnstmt(self):
self.nextLexeme()
self.expression()
self.assert_delimiter()
def breakstmt(self):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Break()
def continuestmt(self):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Continue()
def expression(self):
operators = []
operands = []
self.evaluate_expression(operators, operands)
return evaluate.Expression(operators, operands)
def evaluate_expression(self, operators, operands):
if self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
self.nextLexeme()
operands.append(self.expression())
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
if self.evaluate_token(operators, operands):
self.evaluate_expression(operators, operands)
def evaluate_token(self, operators, operands):
if self.literal():
lit = self.lookahead
self.nextLexeme()
operands.append(evaluate.Literal(self.types[lit[0]], lit[1]))
return True
elif self.variable():
name = self.lookahead
pos = None
self.nextLexeme()
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['EQUAL_SIGN_KEYWORD']):
return self.assignment(name, None)
elif self.check_next(['INCREMENT_KEYWORD']):
self.nextLexeme()
operands.append(evaluate.Increment(self.variables, name[1]))
return True
elif self.check_next(['DECREMENT_KEYWORD']):
self.nextLexeme()
operands.append(evaluate.Decrement(self.variables, name[1]))
return True
elif self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
self.nextLexeme()
while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.expression()
if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD', 'SEMICOLON_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
print('expected ) before ' + self.lookahead[1])
return False
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Variable(self.variables, name[1], pos))
return True
# elif self.check_next(['MINUS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# operands.append(evaluate.Negation(expr))
# return True
elif self.check_next(['LEN_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Len(expr))
return True
elif self.check_next(['RAND_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr1 = self.expression()
self.assert_next('COMMA_KEYWORD', 'expected ,')
expr2 = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Random(expr1, expr2))
return True
elif self.check_next(['READ_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected variable identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Read(self.variables, name))
return True
elif self.check_next(['READLINE_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected variable identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.ReadLine(self.variables, name))
return True
elif self.check_next(['SQRT_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Sqrt(expr))
return True
elif self.check_next(['NOT_KEYWORD']):
self.nextLexeme()
expr = self.expression()
operands.append(evaluate.Not(expr))
return True
elif self.check_next(['INT_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.IntegerCast(expr))
return True
elif self.check_next(['PLUS_KEYWORD', 'MINUS_KEYWORD', 'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD']):
self.append_math_operator(operators, operands)
return True
elif self.check_next(['GREATER_THAN_KEYWORD', 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD', 'OR_KEYWORD', 'EQUALS_KEYWORD',
'NOT_EQUALS_KEYWORD']):
# operators.append(self.lookahead)
# self.nextLexeme()
# operands.append(self.expression())
self.append_boolean_operator(operators, operands)
return True
else:
return False
def append_boolean_operator(self, operators, operands):
operator = self.lookahead
self.nextLexeme()
while operators and operators[0][0] in ['PLUS_KEYWORD', 'MINUS_KEYWORD', 'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD']:
op = operators.pop()
if op[0] == 'MINUS_KEYWORD':
if len(operands) % 2 != 0:
t1 = operands.pop()
operands.append(evaluate.Negation(t1))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
operators.append(operator)
operands.append(self.expression())
def append_math_operator(self, operators, operands):
operator = self.lookahead
self.nextLexeme()
if operators:
while self.check_precedence(operators[0], operator):
op = operators.pop()
if op[0] == 'MINUS_KEYWORD':
if len(operands) % 2 != 0:
t1 = operands.pop()
operands.append(evaluate.Negation(t1))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
operators.append(operator)
def check_precedence(self, op1, op2):
# if op1[0] in ['GREATER_THAN_KEYWORD', 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD', 'OR_KEYWORD', 'EQUALS_KEYWORD',
# 'NOT_EQUALS_KEYWORD']:
# return True
if op1[0] in ['MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD'] and op2 in ['PLUS_KEYWORD', 'MINUS_KEYWORD']:
return True
else:
return False
# def expression(self):
# return self.operation()
#
# def operation(self):
# trm = self.term()
# if trm:
# oprtr = self.operator()
# if oprtr:
# self.nextLexeme()
# oprtn = self.operation()
# if oprtn:
# if oprtr in ['GREATER_THAN_KEYWORD',
# 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
# 'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD']:
# return evaluate.BooleanExpression(oprtr, trm, oprtn)
# else:
# return evaluate.MathExpression(oprtr, oprtn, trm)
# else:
# return False
# else:
# return trm
# else:
# print('expected expression at ' + self.lookahead[1])
# return False
#
# def term(self):
# op = self.operand()
# if op:
# oprtr = self.operator()
# if oprtr:
# self.nextLexeme()
# trm = self.term()
# if trm:
# if oprtr in ['GREATER_THAN_KEYWORD',
# 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
# 'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD']:
# return evaluate.BooleanExpression(oprtr, op, trm)
# else:
# return evaluate.MathExpression(oprtr, trm, op)
# else:
# return False
# else:
# return op
# else:
# return False
#
#
# def operand(self):
# if self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return expr
# else:
# if self.literal():
# lit = self.lookahead
# self.nextLexeme()
# return evaluate.Literal(self.types[lit[0]], lit[1])
# elif self.variable():
# name = self.lookahead
# pos = None
# self.nextLexeme()
# if self.check_next(['OPEN_BRACKET_KEYWORD']):
# self.nextLexeme()
# pos = self.expression()
# self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# if self.check_next(['EQUAL_SIGN_KEYWORD']):
# return self.assignment(name)
# elif self.check_next(['INCREMENT_KEYWORD']):
# self.nextLexeme()
# return evaluate.Increment(self.variables, name[1])
# elif self.check_next(['DECREMENT_KEYWORD']):
# self.nextLexeme()
# return evaluate.Decrement(self.variables, name[1])
# elif self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
# self.nextLexeme()
# while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
# self.expression()
# if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD', 'SEMICOLON_KEYWORD']):
# self.assert_next('COMMA_KEYWORD', 'expected comma')
# if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
# print('expected ) before ' + self.lookahead[1])
# return False
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Variable(self.variables, name[1], pos)
# elif self.check_next(['MINUS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# return evaluate.Negation(expr)
# elif self.check_next(['LEN_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
# # if self.check_next(['STRING_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['INTEGER_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['FLOAT_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['IDENTIFIER']):
# # self.nextLexeme()
# # if self.check_next(['OPEN_BRACKET_KEYWORD']):
# # self.nextLexeme()
# # self.expression()
# # self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Len(expr)
# elif self.check_next(['RAND_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
# expr1 = self.expression()
# self.assert_next('COMMA_KEYWORD', 'expected ,')
# expr2 = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Random(expr1, expr2)
# elif self.check_next(['READ_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# name = self.lookahead[1]
# self.assert_next('IDENTIFIER', 'expected variable identifier')
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Read(self.variables, name)
# elif self.check_next(['READLINE_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# name = self.lookahead[1]
# self.assert_next('IDENTIFIER', 'expected variable identifier')
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.ReadLine(self.variables, name)
# elif self.check_next(['SQRT_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Sqrt(expr)
# elif self.check_next(['NOT_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# return evaluate.Not(expr)
# else:
# return False
def operator(self):
if self.check_next(['PLUS_KEYWORD', 'MINUS_KEYWORD',
'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD',
'MODULO_KEYWORD', 'GREATER_THAN_KEYWORD',
'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD'
]):
return self.lookahead[0]
else:
return False
def literal(self):
return self.check_next(['INTEGER_LITERAL', 'FLOAT_LITERAL', 'STRING_LITERAL', 'TRUE_KEYWORD', 'FALSE_KEYWORD'])
def variable(self):
return self.check_next(['IDENTIFIER'])
| mit | -5,841,835,166,006,237,000 | 42.777924 | 137 | 0.536125 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/activitydefinition_tests.py | 1 | 30643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import activitydefinition
from .fhirdate import FHIRDate
class ActivityDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ActivityDefinition", js["resourceType"])
return activitydefinition.ActivityDefinition(js)
def testActivityDefinition1(self):
inst = self.instantiate_from("activitydefinition-predecessor-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ActivityDefinition instance")
self.implActivityDefinition1(inst)
js = inst.as_json()
self.assertEqual("ActivityDefinition", js["resourceType"])
inst2 = activitydefinition.ActivityDefinition(js)
self.implActivityDefinition1(inst2)
def implActivityDefinition1(self, inst):
self.assertEqual(inst.approvalDate.date, FHIRDate("2016-03-12").date)
self.assertEqual(inst.approvalDate.as_json(), "2016-03-12")
self.assertEqual(inst.author[0].name, "Motive Medical Intelligence")
self.assertEqual(inst.author[0].telecom[0].system, "phone")
self.assertEqual(inst.author[0].telecom[0].use, "work")
self.assertEqual(inst.author[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.author[0].telecom[1].system, "email")
self.assertEqual(inst.author[0].telecom[1].use, "work")
self.assertEqual(inst.author[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.code.coding[0].code, "306206005")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Referral to service (procedure)")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].use, "work")
self.assertEqual(inst.contact[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.copyright, "© Copyright 2016 Motive Medical Intelligence. All rights reserved.")
self.assertEqual(inst.date.date, FHIRDate("2017-03-03T14:06:00Z").date)
self.assertEqual(inst.date.as_json(), "2017-03-03T14:06:00Z")
self.assertEqual(inst.description, "refer to primary care mental-health integrated care program for evaluation and treatment of mental health conditions now")
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2017-12-31").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2017-12-31")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2016-01-01")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "referralPrimaryCareMentalHealth-initial")
self.assertEqual(inst.identifier[0].system, "http://motivemi.com/artifacts")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "referralPrimaryCareMentalHealth")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "US")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.kind, "ServiceRequest")
self.assertEqual(inst.lastReviewDate.date, FHIRDate("2016-08-15").date)
self.assertEqual(inst.lastReviewDate.as_json(), "2016-08-15")
self.assertEqual(inst.name, "ReferralPrimaryCareMentalHealth")
self.assertEqual(inst.participant[0].type, "practitioner")
self.assertEqual(inst.publisher, "Motive Medical Intelligence")
self.assertEqual(inst.relatedArtifact[0].display, "Practice Guideline for the Treatment of Patients with Major Depressive Disorder")
self.assertEqual(inst.relatedArtifact[0].type, "citation")
self.assertEqual(inst.relatedArtifact[0].url, "http://psychiatryonline.org/pb/assets/raw/sitewide/practice_guidelines/guidelines/mdd.pdf")
self.assertEqual(inst.relatedArtifact[1].resource, "ActivityDefinition/referralPrimaryCareMentalHealth")
self.assertEqual(inst.relatedArtifact[1].type, "successor")
self.assertEqual(inst.status, "retired")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Referral to Primary Care Mental Health")
self.assertEqual(inst.topic[0].text, "Mental Health Referral")
self.assertEqual(inst.url, "http://motivemi.com/artifacts/ActivityDefinition/referralPrimaryCareMentalHealth")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "D000328")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "Adult")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "https://meshb.nlm.nih.gov")
self.assertEqual(inst.useContext[1].code.code, "focus")
self.assertEqual(inst.useContext[1].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].code, "87512008")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].display, "Mild major depression")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[2].code.code, "focus")
self.assertEqual(inst.useContext[2].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].code, "40379007")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].display, "Major depression, recurrent, mild")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[3].code.code, "focus")
self.assertEqual(inst.useContext[3].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].code, "225444004")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].display, "At risk for suicide (finding)")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[4].code.code, "focus")
self.assertEqual(inst.useContext[4].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].code, "306206005")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].display, "Referral to service (procedure)")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[5].code.code, "user")
self.assertEqual(inst.useContext[5].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].code, "309343006")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].display, "Physician")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[6].code.code, "venue")
self.assertEqual(inst.useContext[6].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].code, "440655000")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].display, "Outpatient environment")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.version, "1.0.0")
def testActivityDefinition2(self):
inst = self.instantiate_from("activitydefinition-medicationorder-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ActivityDefinition instance")
self.implActivityDefinition2(inst)
js = inst.as_json()
self.assertEqual("ActivityDefinition", js["resourceType"])
inst2 = activitydefinition.ActivityDefinition(js)
self.implActivityDefinition2(inst2)
def implActivityDefinition2(self, inst):
self.assertEqual(inst.approvalDate.date, FHIRDate("2016-03-12").date)
self.assertEqual(inst.approvalDate.as_json(), "2016-03-12")
self.assertEqual(inst.author[0].name, "Motive Medical Intelligence")
self.assertEqual(inst.author[0].telecom[0].system, "phone")
self.assertEqual(inst.author[0].telecom[0].use, "work")
self.assertEqual(inst.author[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.author[0].telecom[1].system, "email")
self.assertEqual(inst.author[0].telecom[1].use, "work")
self.assertEqual(inst.author[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].use, "work")
self.assertEqual(inst.contact[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.contained[0].id, "citalopramMedication")
self.assertEqual(inst.contained[1].id, "citalopramSubstance")
self.assertEqual(inst.copyright, "© Copyright 2016 Motive Medical Intelligence. All rights reserved.")
self.assertEqual(inst.date.date, FHIRDate("2015-08-15").date)
self.assertEqual(inst.date.as_json(), "2015-08-15")
self.assertEqual(inst.description, "Citalopram 20 mg tablet 1 tablet oral 1 time daily now (30 table; 3 refills")
self.assertEqual(inst.dosage[0].doseAndRate[0].doseQuantity.unit, "{tbl}")
self.assertEqual(inst.dosage[0].doseAndRate[0].doseQuantity.value, 1)
self.assertEqual(inst.dosage[0].doseAndRate[0].type.coding[0].code, "ordered")
self.assertEqual(inst.dosage[0].doseAndRate[0].type.coding[0].display, "Ordered")
self.assertEqual(inst.dosage[0].doseAndRate[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/dose-rate-type")
self.assertEqual(inst.dosage[0].route.coding[0].code, "26643006")
self.assertEqual(inst.dosage[0].route.coding[0].display, "Oral route (qualifier value)")
self.assertEqual(inst.dosage[0].route.text, "Oral route (qualifier value)")
self.assertEqual(inst.dosage[0].text, "1 tablet oral 1 time daily")
self.assertEqual(inst.dosage[0].timing.repeat.frequency, 1)
self.assertEqual(inst.dosage[0].timing.repeat.period, 1)
self.assertEqual(inst.dosage[0].timing.repeat.periodUnit, "d")
self.assertEqual(inst.dynamicValue[0].expression.description, "dispenseRequest.numberOfRepeatsAllowed is three (3)")
self.assertEqual(inst.dynamicValue[0].expression.expression, "3")
self.assertEqual(inst.dynamicValue[0].expression.language, "text/cql")
self.assertEqual(inst.dynamicValue[0].path, "dispenseRequest.numberOfRepeatsAllowed")
self.assertEqual(inst.dynamicValue[1].expression.description, "dispenseRequest.quantity is thirty (30) tablets")
self.assertEqual(inst.dynamicValue[1].expression.expression, "30 '{tbl}'")
self.assertEqual(inst.dynamicValue[1].expression.language, "text/cql")
self.assertEqual(inst.dynamicValue[1].path, "dispenseRequest.quantity")
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2017-12-31").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2017-12-31")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2016-01-01")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "citalopramPrescription")
self.assertEqual(inst.identifier[0].system, "http://motivemi.com")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "citalopramPrescription")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "US")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.kind, "MedicationRequest")
self.assertEqual(inst.lastReviewDate.date, FHIRDate("2016-08-15").date)
self.assertEqual(inst.lastReviewDate.as_json(), "2016-08-15")
self.assertEqual(inst.name, "CitalopramPrescription")
self.assertEqual(inst.publisher, "Motive Medical Intelligence")
self.assertEqual(inst.purpose, "Defines a guideline supported prescription for the treatment of depressive disorders")
self.assertEqual(inst.relatedArtifact[0].display, "Practice Guideline for the Treatment of Patients with Major Depressive Disorder")
self.assertEqual(inst.relatedArtifact[0].type, "citation")
self.assertEqual(inst.relatedArtifact[0].url, "http://psychiatryonline.org/pb/assets/raw/sitewide/practice_guidelines/guidelines/mdd.pdf")
self.assertEqual(inst.relatedArtifact[1].resource, "#citalopramMedication")
self.assertEqual(inst.relatedArtifact[1].type, "composed-of")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Citalopram Prescription")
self.assertEqual(inst.topic[0].text, "Mental Health Treatment")
self.assertEqual(inst.url, "http://motivemi.com/artifacts/ActivityDefinition/citalopramPrescription")
self.assertEqual(inst.usage, "This activity definition is used as part of various suicide risk order sets")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "D000328")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "Adult")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "https://meshb.nlm.nih.gov")
self.assertEqual(inst.useContext[1].code.code, "focus")
self.assertEqual(inst.useContext[1].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].code, "87512008")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].display, "Mild major depression")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[2].code.code, "focus")
self.assertEqual(inst.useContext[2].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].code, "40379007")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].display, "Major depression, recurrent, mild")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[3].code.code, "focus")
self.assertEqual(inst.useContext[3].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].code, "225444004")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].display, "At risk for suicide (finding)")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[4].code.code, "focus")
self.assertEqual(inst.useContext[4].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].code, "306206005")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].display, "Referral to service (procedure)")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[5].code.code, "user")
self.assertEqual(inst.useContext[5].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].code, "309343006")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].display, "Physician")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[6].code.code, "venue")
self.assertEqual(inst.useContext[6].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].code, "440655000")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].display, "Outpatient environment")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.version, "1.0.0")
def testActivityDefinition3(self):
inst = self.instantiate_from("activitydefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ActivityDefinition instance")
self.implActivityDefinition3(inst)
js = inst.as_json()
self.assertEqual("ActivityDefinition", js["resourceType"])
inst2 = activitydefinition.ActivityDefinition(js)
self.implActivityDefinition3(inst2)
def implActivityDefinition3(self, inst):
self.assertEqual(inst.approvalDate.date, FHIRDate("2017-03-01").date)
self.assertEqual(inst.approvalDate.as_json(), "2017-03-01")
self.assertEqual(inst.author[0].name, "Motive Medical Intelligence")
self.assertEqual(inst.author[0].telecom[0].system, "phone")
self.assertEqual(inst.author[0].telecom[0].use, "work")
self.assertEqual(inst.author[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.author[0].telecom[1].system, "email")
self.assertEqual(inst.author[0].telecom[1].use, "work")
self.assertEqual(inst.author[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.code.coding[0].code, "306206005")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Referral to service (procedure)")
self.assertEqual(inst.contact[0].telecom[0].system, "phone")
self.assertEqual(inst.contact[0].telecom[0].use, "work")
self.assertEqual(inst.contact[0].telecom[0].value, "415-362-4007")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].use, "work")
self.assertEqual(inst.contact[0].telecom[1].value, "[email protected]")
self.assertEqual(inst.copyright, "© Copyright 2016 Motive Medical Intelligence. All rights reserved.")
self.assertEqual(inst.date.date, FHIRDate("2017-03-03T14:06:00Z").date)
self.assertEqual(inst.date.as_json(), "2017-03-03T14:06:00Z")
self.assertEqual(inst.description, "refer to primary care mental-health integrated care program for evaluation and treatment of mental health conditions now")
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2017-12-31").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2017-12-31")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2017-03-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2017-03-01")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "referralPrimaryCareMentalHealth")
self.assertEqual(inst.identifier[0].system, "http://motivemi.com/artifacts")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "referralPrimaryCareMentalHealth")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "US")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.kind, "ServiceRequest")
self.assertEqual(inst.lastReviewDate.date, FHIRDate("2017-03-01").date)
self.assertEqual(inst.lastReviewDate.as_json(), "2017-03-01")
self.assertEqual(inst.name, "ReferralPrimaryCareMentalHealth")
self.assertEqual(inst.participant[0].type, "practitioner")
self.assertEqual(inst.publisher, "Motive Medical Intelligence")
self.assertEqual(inst.relatedArtifact[0].display, "Practice Guideline for the Treatment of Patients with Major Depressive Disorder")
self.assertEqual(inst.relatedArtifact[0].type, "citation")
self.assertEqual(inst.relatedArtifact[0].url, "http://psychiatryonline.org/pb/assets/raw/sitewide/practice_guidelines/guidelines/mdd.pdf")
self.assertEqual(inst.relatedArtifact[1].resource, "ActivityDefinition/referralPrimaryCareMentalHealth-initial")
self.assertEqual(inst.relatedArtifact[1].type, "predecessor")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Referral to Primary Care Mental Health")
self.assertEqual(inst.topic[0].text, "Mental Health Referral")
self.assertEqual(inst.url, "http://motivemi.com/artifacts/ActivityDefinition/referralPrimaryCareMentalHealth")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "D000328")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "Adult")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "https://meshb.nlm.nih.gov")
self.assertEqual(inst.useContext[1].code.code, "focus")
self.assertEqual(inst.useContext[1].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].code, "87512008")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].display, "Mild major depression")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[2].code.code, "focus")
self.assertEqual(inst.useContext[2].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].code, "40379007")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].display, "Major depression, recurrent, mild")
self.assertEqual(inst.useContext[2].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[3].code.code, "focus")
self.assertEqual(inst.useContext[3].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].code, "225444004")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].display, "At risk for suicide (finding)")
self.assertEqual(inst.useContext[3].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[4].code.code, "focus")
self.assertEqual(inst.useContext[4].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].code, "306206005")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].display, "Referral to service (procedure)")
self.assertEqual(inst.useContext[4].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[5].code.code, "user")
self.assertEqual(inst.useContext[5].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].code, "309343006")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].display, "Physician")
self.assertEqual(inst.useContext[5].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[6].code.code, "venue")
self.assertEqual(inst.useContext[6].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].code, "440655000")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].display, "Outpatient environment")
self.assertEqual(inst.useContext[6].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.version, "1.1.0")
def testActivityDefinition4(self):
inst = self.instantiate_from("activitydefinition-servicerequest-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ActivityDefinition instance")
self.implActivityDefinition4(inst)
js = inst.as_json()
self.assertEqual("ActivityDefinition", js["resourceType"])
inst2 = activitydefinition.ActivityDefinition(js)
self.implActivityDefinition4(inst2)
def implActivityDefinition4(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "17401000")
self.assertEqual(inst.bodySite[0].coding[0].display, "Heart valve structure")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.coding[0].code, "34068001")
self.assertEqual(inst.code.coding[0].display, "Heart valve replacement")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.description, "Heart valve replacement")
self.assertEqual(inst.id, "heart-valve-replacement")
self.assertEqual(inst.kind, "ServiceRequest")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.participant[0].role.coding[0].code, "207RI0011X")
self.assertEqual(inst.participant[0].role.coding[0].display, "Interventional Cardiology")
self.assertEqual(inst.participant[0].role.coding[0].system, "http://nucc.org/provider-taxonomy")
self.assertEqual(inst.participant[0].role.text, "Interventional Cardiology")
self.assertEqual(inst.participant[0].type, "practitioner")
self.assertEqual(inst.purpose, "Describes the proposal to perform a Heart Valve replacement.")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.topic[0].coding[0].code, "34068001")
self.assertEqual(inst.topic[0].coding[0].display, "Heart valve replacement")
self.assertEqual(inst.topic[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].code, "D000328")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].display, "Adult")
self.assertEqual(inst.useContext[0].valueCodeableConcept.coding[0].system, "https://meshb.nlm.nih.gov")
self.assertEqual(inst.useContext[1].code.code, "user")
self.assertEqual(inst.useContext[1].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].code, "309343006")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].display, "Physician")
self.assertEqual(inst.useContext[1].valueCodeableConcept.coding[0].system, "http://snomed.info/sct")
def testActivityDefinition5(self):
inst = self.instantiate_from("activitydefinition-supplyrequest-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ActivityDefinition instance")
self.implActivityDefinition5(inst)
js = inst.as_json()
self.assertEqual("ActivityDefinition", js["resourceType"])
inst2 = activitydefinition.ActivityDefinition(js)
self.implActivityDefinition5(inst2)
def implActivityDefinition5(self, inst):
self.assertEqual(inst.code.coding[0].code, "BlueTubes")
self.assertEqual(inst.code.coding[0].display, "Blood collect tubes blue cap")
self.assertEqual(inst.description, "10 Blood collect tubes blue cap")
self.assertEqual(inst.id, "blood-tubes-supply")
self.assertEqual(inst.kind, "SupplyRequest")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.purpose, "Describes a request for 10 Blood collection tubes with blue caps.")
self.assertEqual(inst.quantity.value, 10)
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.transform, "StructureMap/supplyrequest-transform")
| bsd-3-clause | 5,508,062,426,552,343,000 | 74.841584 | 166 | 0.715992 | false |
icyflame/batman | tests/flow_tests.py | 1 | 7876 | # -*- coding: utf-8 -*-
"""Tests for the flow module."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot.exceptions import NoPage
from pywikibot.flow import Board, Topic, Post
from pywikibot.tools import PY2
from tests.aspects import (
TestCase,
)
from tests.basepage_tests import (
BasePageMethodsTestBase,
BasePageLoadRevisionsCachingTestBase,
)
if not PY2:
unicode = str
class TestBoardBasePageMethods(BasePageMethodsTestBase):
"""Test Flow board pages using BasePage-defined methods."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Board(self.site, 'Talk:Sandbox')
super(TestBoardBasePageMethods, self).setUp()
def test_basepage_methods(self):
"""Test basic Page methods on a Flow board page."""
self._test_invoke()
self._test_return_datatypes()
self.assertFalse(self._page.isRedirectPage())
self.assertEqual(self._page.latest_revision.parent_id, 0)
def test_content_model(self):
"""Test Flow page content model."""
self.assertEqual(self._page.content_model, 'flow-board')
class TestTopicBasePageMethods(BasePageMethodsTestBase):
"""Test Flow topic pages using BasePage-defined methods."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
super(TestTopicBasePageMethods, self).setUp()
def test_basepage_methods(self):
"""Test basic Page methods on a Flow topic page."""
self._test_invoke()
self._test_return_datatypes()
self.assertFalse(self._page.isRedirectPage())
self.assertEqual(self._page.latest_revision.parent_id, 0)
def test_content_model(self):
"""Test Flow topic page content model."""
self.assertEqual(self._page.content_model, 'flow-board')
class TestLoadRevisionsCaching(BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Board(self.site, 'Talk:Sandbox')
super(TestLoadRevisionsCaching, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self.skipTest('See T107537')
self._test_page_text()
class TestFlowLoading(TestCase):
"""Test loading of Flow objects from the API."""
family = 'mediawiki'
code = 'mediawiki'
cached = True
def test_board_uuid(self):
"""Test retrieval of Flow board UUID."""
board = Board(self.site, 'Talk:Sandbox')
self.assertEqual(board.uuid, 'rl7iby6wgksbpfno')
def test_topic_uuid(self):
"""Test retrieval of Flow topic UUID."""
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
self.assertEqual(topic.uuid, 'sh6wgo5tu3qui1w2')
def test_post_uuid(self):
"""Test retrieval of Flow post UUID.
This doesn't really "load" anything from the API. It just tests
the property to make sure the UUID passed to the constructor is
stored properly.
"""
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
post = Post(topic, 'sh6wgoagna97q0ia')
self.assertEqual(post.uuid, 'sh6wgoagna97q0ia')
def test_post_contents(self):
"""Test retrieval of Flow post contents."""
# Load
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
post = Post(topic, 'sh6wgoagna97q0ia')
# Wikitext
wikitext = post.get(format='wikitext')
self.assertIn('wikitext', post._content)
self.assertNotIn('html', post._content)
self.assertIsInstance(wikitext, unicode)
self.assertNotEqual(wikitext, '')
# HTML
html = post.get(format='html')
self.assertIn('html', post._content)
self.assertIn('wikitext', post._content)
self.assertIsInstance(html, unicode)
self.assertNotEqual(html, '')
# Caching (hit)
post._content['html'] = 'something'
html = post.get(format='html')
self.assertIsInstance(html, unicode)
self.assertEqual(html, 'something')
self.assertIn('html', post._content)
# Caching (reload)
post._content['html'] = 'something'
html = post.get(format='html', force=True)
self.assertIsInstance(html, unicode)
self.assertNotEqual(html, 'something')
self.assertIn('html', post._content)
def test_topiclist(self):
"""Test loading of topiclist."""
board = Board(self.site, 'Talk:Sandbox')
i = 0
for topic in board.topics(limit=7):
i += 1
if i == 10:
break
self.assertEqual(i, 10)
class TestFlowFactoryErrors(TestCase):
"""Test errors associated with class methods generating Flow objects."""
family = 'test'
code = 'test'
cached = True
def test_illegal_arguments(self):
"""Test illegal method arguments."""
board = Board(self.site, 'Talk:Pywikibot test')
real_topic = Topic(self.site, 'Topic:Slbktgav46omarsd')
fake_topic = Topic(self.site, 'Topic:Abcdefgh12345678')
# Topic.from_topiclist_data
self.assertRaises(TypeError, Topic.from_topiclist_data, self.site, '', {})
self.assertRaises(TypeError, Topic.from_topiclist_data, board, 521, {})
self.assertRaises(TypeError, Topic.from_topiclist_data, board,
'slbktgav46omarsd', [0, 1, 2])
self.assertRaises(NoPage, Topic.from_topiclist_data, board,
'abc', {'stuff': 'blah'})
# Post.fromJSON
self.assertRaises(TypeError, Post.fromJSON, board, 'abc', {})
self.assertRaises(TypeError, Post.fromJSON, real_topic, 1234, {})
self.assertRaises(TypeError, Post.fromJSON, real_topic, 'abc', [])
self.assertRaises(NoPage, Post.fromJSON, fake_topic, 'abc',
{'posts': [], 'revisions': []})
def test_invalid_data(self):
"""Test invalid "API" data."""
board = Board(self.site, 'Talk:Pywikibot test')
real_topic = Topic(self.site, 'Topic:Slbktgav46omarsd')
# Topic.from_topiclist_data
self.assertRaises(ValueError, Topic.from_topiclist_data,
board, 'slbktgav46omarsd', {'stuff': 'blah'})
self.assertRaises(ValueError, Topic.from_topiclist_data,
board, 'slbktgav46omarsd',
{'posts': [], 'revisions': []})
self.assertRaises(ValueError, Topic.from_topiclist_data, board,
'slbktgav46omarsd',
{'posts': {'slbktgav46omarsd': ['123']},
'revisions': {'456': []}})
self.assertRaises(AssertionError, Topic.from_topiclist_data, board,
'slbktgav46omarsd',
{'posts': {'slbktgav46omarsd': ['123']},
'revisions': {'123': {'content': 789}}})
# Post.fromJSON
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc', {})
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc',
{'stuff': 'blah'})
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc',
{'posts': {'abc': ['123']},
'revisions': {'456': []}})
self.assertRaises(AssertionError, Post.fromJSON, real_topic, 'abc',
{'posts': {'abc': ['123']},
'revisions': {'123': {'content': 789}}})
| mit | 6,127,447,604,697,114,000 | 34.318386 | 82 | 0.599543 | false |
jimmykiselak/lbrycrd | qa/rpc-tests/maxblocksinflight.py | 1 | 3782 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| mit | 2,546,787,251,703,153,000 | 38.395833 | 110 | 0.618456 | false |
zestrada/nova-cs498cc | nova/tests/network/test_manager.py | 1 | 111961 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mox
from oslo.config import cfg
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova import quota
from nova import test
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': FAKEUUID,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, (nw, info) in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None}
self.assertThat(nw, matchers.DictMatches(check))
check = {'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': 'fe80::def',
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertThat(info, matchers.DictMatches(check))
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8:0:1::%x' % nid,
'netmask': 64,
'gateway': 'fe80::def'}]
self.assertThat(info['ip6s'], matchers.DictListMatches(check))
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
'ip': '192.168.%d.%03d' % (nid, ip_num + 99),
'netmask': '255.255.255.0',
'gateway': '192.168.%d.1' % nid}
for ip_num in xrange(1, num_fixed_ips + 1)]
self.assertThat(info['ips'], matchers.DictListMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[1])
ip = fixed_ips[1].copy()
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_using_id_without_vpn(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
def test_add_fixed_ip_instance_using_uuid_without_vpn(self):
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixedip)
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
quota.QUOTAS.reserve(mox.IgnoreArg(),
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg()).AndReturn({'display_name': HOST})
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
def test_allocate_floating_ip(self):
self.assertEqual(self.network.allocate_floating_ip(self.context,
1, None), None)
def test_deallocate_floating_ip(self):
self.assertEqual(self.network.deallocate_floating_ip(self.context,
1, None), None)
def test_associate_floating_ip(self):
self.assertEqual(self.network.associate_floating_ip(self.context,
None, None), None)
def test_disassociate_floating_ip(self):
self.assertEqual(self.network.disassociate_floating_ip(self.context,
None, None),
None)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'display_name': HOST})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'display_name': HOST})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_validate_networks(self):
def network_get(_context, network_id, project_only='allow_none'):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
fixed_ips[1]['network_id'] = networks[1]['id']
fixed_ips[1]['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips[1])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = {'address': '10.0.0.1',
'project_id': 'testproject'}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None,
'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return {'address': '10.0.0.1', 'network': 'fakenet'}
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise exception.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
def fake_fixed_ip_get(context, fixed_ip_id):
return {'address': 'old', 'instance_uuid': 'fake_uuid'}
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
def test_floating_ip_init_host(self):
def get_all_by_host(_context, _host):
return [{'interface': 'foo',
'address': 'foo'},
{'interface': 'fakeiface',
'address': 'fakefloat',
'fixed_ip_id': 1},
{'interface': 'bar',
'address': 'bar',
'fixed_ip_id': 2}]
self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host',
get_all_by_host)
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return {'address': 'fakefixed', 'network': 'fakenet'}
raise exception.FixedIpNotFound(id=fixed_ip_id)
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=False)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface='fooiface')
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fooiface',
'fakenet')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'project_id': ctxt.project_id}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'instance_uuid': FAKEUUID,
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1,
'auto_assigned': True,
'project_id': ctxt.project_id}
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}],
'availability_zone': '',
'uuid': FAKEUUID})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()).AndReturn(networks[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'display_name': HOST})
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
def test_ip_association_and_allocation_of_other_project(self):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
address = '1.2.3.4'
float_addr = db.floating_ip_create(context1.elevated(),
{'address': address,
'project_id': context1.project_id})
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid'])
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
def test_deallocate_fixed(self):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return {'address': 'fake_mac'}
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
network = db.network_get(elevated, fixed['network_id'])
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(network['bridge'], fixed['address'], 'fake_mac')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(db, 'network_get', network_get)
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': 0,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
newfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
def test_deallocate_fixed_no_vif(self):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return"""
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
def test_fixed_ip_cleanup_fail(self):
# Verify IP is not deallocated if the security group refresh fails.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
network = db.network_get(elevated, fixed['network_id'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, fix_addr, 'fake')
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
self.assertTrue(fixed['allocated'])
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
ipv6.reset_backend()
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
def test_deallocate_for_instance_passes_host_info(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.instance_get = lambda _x, _y: dict(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
db.fixed_ip_get_by_instance = lambda x, y: [dict(address='1.2.3.4',
network_id='ignoredid')]
manager.deallocate_for_instance(
ctx, instance_id='ignore', host='somehost')
self.assertEquals([
(ctx, '1.2.3.4', 'somehost')
], manager.deallocate_fixed_ip_calls)
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99,
HOST,
'10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# CidrConflict: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_instance_uuids_by_ipv6_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_instance_uuids_by_ip(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
def test_get_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
def test_get_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NetworkNotFoundForUUID(uuid='fake')
)
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
def test_get_all_networks(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all')
manager.db.network_get_all(mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
def test_disassociate_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
def test_disassociate_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_by_uuid')
manager.db.network_get_by_uuid(
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NetworkNotFoundForUUID(uuid='fake')
)
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_static_fixed_range(self, net_manager):
self.flags(fake_network=True,
fixed_range='10.0.0.0/22',
routing_source_ip='192.168.0.1',
metadata_host='192.168.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
# Call the network manager init code to configure the fixed_range
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# The expected rules that should be configured based on the fixed_range
expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
% (binary_name, CONF.fixed_range,
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, CONF.fixed_range,
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, CONF.fixed_range, CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
CONF.fixed_range,
CONF.fixed_range)]
# Finally, compare the expected rules against the actual ones
for line in expected_lines:
self.assertTrue(line in new_lines)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
fixed_range='',
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(networks)
self.mox.ReplayAll()
# Call the network manager init code to configure the fixed_range
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# The expected rules that should be configured based on the fixed_range
expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertTrue(line in new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
# Call the network manager init code to configure the fixed_range
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o '
'%s' % (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertTrue(line in new_lines)
def test_flatdhcpmanager_static_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test existing behavior:
# CONF.fixed_range is set, NAT based on CONF.fixed_range
self._test_init_host_static_fixed_range(self.network)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_static_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test existing behavior:
# CONF.fixed_range is set, NAT based on CONF.fixed_range
self._test_init_host_static_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes."""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return {}
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class BackdoorPortTestCase(test.TestCase):
"""Tests nova.network.manager.get_backdoor_port."""
def setUp(self):
super(BackdoorPortTestCase, self).setUp()
self.manager = network_manager.NetworkManager()
self.manager.backdoor_port = 59697
self.context = context.RequestContext('fake', 'fake')
def test_backdoor_port(self):
port = self.manager.get_backdoor_port(self.context)
self.assertEqual(port, self.manager.backdoor_port)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEquals(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
fixed_ips=fixed_ip,
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEquals(1, len(assigned_macs))
self.assertEquals(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance, self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_disassociate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': 12
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network.db,
'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.stubs.Set(self.network.db,
'service_get_by_host_and_topic',
lambda _x, _y, _z: 'service')
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
def test_associate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': None
}
fixed_ip = {
'network_id': None,
'instance_uuid': 'instance-uuid'
}
network = {
'multi_host': True
}
instance = {
'host': 'some-other-host'
}
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network.db,
'floating_ip_get_by_address',
lambda _x, _y: floating_ip)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
self.stubs.Set(self.network.db,
'fixed_ip_get_by_address',
lambda _x, _y: fixed_ip)
self.stubs.Set(self.network.db,
'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
'instance_get_by_uuid',
lambda _x, _y: instance)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.api.instance_create(self.context, {
'project_id': self.project_id, 'deleted': True})
network = db.api.network_create_safe(self.context.elevated(), {
'project_id': self.project_id})
addr = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
fixed = db.fixed_ip_get_by_address(
self.context.elevated(read_deleted='yes'), addr)
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['uuid'])
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = db.api.instance_create(self.context, {
'project_id': self.project_id})
network = db.api.network_create_safe(self.context.elevated(), {
'project_id': self.project_id})
addr = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance['uuid'], 'address': '10.1.1.1',
'network_id': network['id']})
fixed = db.fixed_ip_get_by_address(
self.context.elevated(read_deleted='yes'), addr)
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.api.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance['uuid'],
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context,
instance_id=instance['uuid'])
def test_migrate_instance_start(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2',
'network': 'fakenet'}
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
def test_migrate_instance_finish(self):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return {'address': address,
'fixed_ip_id': 0}
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
'address': '10.0.0.2',
'network': 'fakenet'}
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
pass
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
self.stubs.Set(self.network.db, 'floating_ip_update',
fake_floating_ip_update)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 2)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[1]['domain'], domain2)
self.assertEquals(domains[0]['project'], 'testproject')
self.assertEquals(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEquals(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 'fake_net',
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 'fake_net')
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(rpc_common.ClientException,
self.network.get_floating_ip,
self.context, 'fake-id')
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 1)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
| apache-2.0 | -7,081,629,825,911,022,000 | 42.717688 | 79 | 0.536374 | false |
botify-labs/mangrove | mangrove/pool.py | 1 | 11546 | from abc import ABCMeta
from multiprocessing import cpu_count
from concurrent.futures import ThreadPoolExecutor
from boto import ec2
from mangrove.declarative import ServiceDeclaration, ServicePoolDeclaration
from mangrove.mappings import ConnectionsMapping
from mangrove.utils import get_boto_module
from mangrove.exceptions import (
MissingMethodError,
DoesNotExistError,
NotConnectedError
)
class ServicePool(object):
"""Aws service connection pool wrapper
ServicePool class should be subclassed to provide
an amazon aws service connection pool. To do so,
creating a brand new class subclassing this one and
setting the services class attribute to an
existing boto module class should be enough.
* *Examples*: please take a look to mangrove.services
modules.
* *Nota*: To be as efficient as possible, every selected
regions connections will be made asynchronously using the
backported python3.2 concurrent.futures module.
:param regions: AWS regions to connect the service to as
a default every regions will be used.
:type regions: list of strings
:param default_region: region to be used as a default
:type default_region: string
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
__meta__ = ABCMeta
# Name of the boto python module to be used. Just in case
# you'd wanna use a fork instead.
_boto_module_name = 'boto'
# Boto aws service name to bind the regionalized
# pool to.
service = None
def __init__(self, connect=False, regions=None, default_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
self._service_declaration = ServiceDeclaration(self.service)
self._service_declaration.regions = regions
self._service_declaration.default_region = default_region
self.module = self._service_declaration.module
self._executor = ThreadPoolExecutor(max_workers=cpu_count())
self._connections = ConnectionsMapping()
# _default_region private property setting should
# always be called after the _regions_names is set
self._regions_names = regions
self._default_region = default_region
if connect is True:
self.connect(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
def connect(self, aws_access_key_id=None, aws_secret_access_key=None):
"""Starts connections to pool's services
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
# For performances reasons, every regions connections are
# made concurrently through the concurent.futures library.
for region in self._service_declaration.regions:
self._connections[region] = self._executor.submit(
self._connect_module_to_region,
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
if self._default_region is not None:
self._connections.default = self._service_declaration.default_region
def _connect_module_to_region(self, region, aws_access_key_id=None,
aws_secret_access_key=None):
"""Calls the connect_to_region method over the service's
module
:param region: AWS region to connect the service to.
:type region: list of strings
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
return self.module.connect_to_region(region)
@property
def regions(self):
return self._connections
def region(self, region_name):
"""Access a pools specific region connections
:param region_name: region connection to be accessed
:type region_name: string
"""
if not region_name in self._connections:
raise NotConnectedError(
"No active connexion found for {} region, "
"please use .connect() method to proceed.".format(region_name)
)
return self._connections[region_name]
def add_region(self, region_name):
"""Connect the pool to a new region
:param region_name: Name of the region to connect to
:type region_name: string
"""
region_client = self._connect_module_to_region(region_name)
self._connections[region_name] = region_client
self._service_declaration.regions.append(region_name)
class ServiceMixinPool(object):
"""Multiple AWS services connection pool wrapper class
ServiceMixinPool mixes the ServicePool subclasses instances
into independent pool. It can be pretty usefull when you need
to build your own custom pool exposing multiple services in
multiples regions.
For example, insteading of instanciating different pools for each
and every services you want to use, subclassing ServiceMixinPool
would allow you to create a pool exposing them transparently like
so:
::code-block: python
class MyPool(ServiceMixinPool):
services = {
'ec2': {
'regions': '*', # Wildcard for "every regions"
'default_region': 'eu-west-1'
},
'sqs': {
'regions': ['us-east-1', 'us-west-1', 'eu-west-1'],
'default_region': 'us-west-1',
},
}
pool = MyPool()
pool.ec2.eu_west_1.get_all_instances()
pool.s3.bucket('test')
...
:param connect: Should the pool init services regions connections
on instanciation.
:type connect: bool
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
__meta__ = ABCMeta
# aws services to be added to the mixin pool. To add one, please
# respect the following pattern:
# 'service_name': {'regions': [], 'default_region'}
# * regions parameter should be whether a list of aws regions names,
# or the '*' wildcard (['*'])
# * default_region parameter should be an aws region part of
# the provided regions parameters
services = {}
def __init__(self, connect=False,
aws_access_key_id=None, aws_secret_access_key=None):
self._executor = ThreadPoolExecutor(max_workers=cpu_count())
self._services_declaration = ServicePoolDeclaration(self.services)
self._services_store = {}
self._load_services(connect)
def _load_services(self, connect=None, aws_access_key_id=None,
aws_secret_access_key=None):
"""Helper private method adding every services referenced services
to mixin pool
:param connect: Should the pool being connected to remote services
at startup.
:type connect: boolean
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
for service_name, localisation in self._services_declaration.iteritems():
self.add_service(
service_name,
connect=connect,
regions=localisation.regions,
default_region=localisation.default_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
def connect(self):
"""Connects every services in the pool"""
for name, pool in self._services_store.iteritems():
pool.connect()
def add_service(self, service_name, connect=False,
regions=None, default_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Adds a service connection to the services pool
:param service_name: name of the AWS service to add
:type service_name: string
:param regions: AWS regions to connect the service to.
:type regions: list of strings
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
service_pool_kls = type(service_name.capitalize(), (ServicePool,), {})
service_pool_kls.service = service_name
service_pool_instance = service_pool_kls(
connect=False,
regions=regions,
default_region=default_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
setattr(self, service_name, service_pool_instance)
if service_name not in self._services_store:
self._services_store[service_name] = service_pool_instance
if service_name not in self._services_declaration:
self._services_declaration[service_name].regions = regions or '*'
if default_region is not None:
self._services_declaration[service_name].default_region = default_region
return service_pool_instance
| mit | -903,598,347,621,880,300 | 38.272109 | 88 | 0.597696 | false |
tehamalab/dgs | goals/models.py | 1 | 39889 | import json
from django.db import models
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields import HStoreField, ArrayField
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify, truncatechars
from django.utils.functional import cached_property
from django.core.urlresolvers import reverse
from mptt.models import MPTTModel, TreeForeignKey
from mptt.signals import node_moved
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit
def area_type_topo_path(instance, filename):
return 'topojson/areatype/{0}/{1}'.format(instance.code, filename)
class AreaType(models.Model):
code = models.CharField(_('Code'), max_length=20, unique=True)
name = models.CharField(_('Name'), max_length=255)
description = models.TextField(_('Description'), blank=True)
topojson = models.FileField(_('TopoJSON'), blank=True, null=True,
upload_to=area_type_topo_path)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Area Type')
verbose_name_plural = _('Area Types')
def __str__(self):
return self.name
class Area(MPTTModel):
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children', db_index=True)
code = models.CharField(_('Area code'), max_length=20, unique=True)
name = models.CharField(_('Area name'), max_length=255)
type = models.ForeignKey('goals.AreaType',
verbose_name=_('Area type'),
related_name='areas')
description = models.TextField(_('Area description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/areas/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Area')
verbose_name_plural = _('Areas')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
if self.type:
self.extras['type_code'] = self.type.code
self.extras['type_name'] = self.type.name
super(Area, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('area-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def type_code(self):
if self.type:
return self.extras.get('type_code', '') or self.type.code
return ""
@cached_property
def type_name(self):
if self.type:
return self.extras.get('type_name', '') or self.type.name
return ""
class SectorType(models.Model):
code = models.CharField(_('Code'), max_length=20, unique=True)
name = models.CharField(_('Name'), max_length=255)
description = models.TextField(_('Description'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Sector Type')
verbose_name_plural = _('Sector Types')
def __str__(self):
return self.name
class Sector(MPTTModel):
themes = models.ManyToManyField('goals.Theme', verbose_name='Themes',
related_name='sectors')
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children', db_index=True)
name = models.CharField(_('Sector name'), max_length=255)
code = models.CharField(_('Sector code'), max_length=20)
type = models.ForeignKey('goals.SectorType',
verbose_name=_('Sector type'),
related_name='sextors')
description = models.TextField(_('Sector description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/sectors/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Sector')
verbose_name_plural = _('Sectors')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
if self.type:
self.extras['type_code'] = self.type.code
self.extras['type_name'] = self.type.name
if self.parent:
self.extras['parent_name'] = self.parent.name
super(Sector, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def type_code(self):
return self.extras.get('type_code', '') or self.type.code
@cached_property
def type_name(self):
return self.extras.get('type_name', '') or self.type.name
@cached_property
def parent_name(self):
if self.parent:
return self.extras.get('parent_name', '') or self.parent.name
return ''
@cached_property
def ancestors_ids(self):
return json.loads(self.extras.get('ancestors_ids', '[]'))\
or [ancestor.id for ancestor in self.get_ancestors()]
@cached_property
def ancestors_codes(self):
return json.loads(self.extras.get('ancestors_codes', '[]'))\
or [ancestor.code for ancestor in self.get_ancestors()]
@cached_property
def ancestors_names(self):
return json.loads(self.extras.get('ancestors_names', '[]'))\
or [ancestor.name for ancestor in self.get_ancestors()]
@cached_property
def themes_codes(self):
return json.loads(self.extras.get('themes_codes', '[]'))
@cached_property
def themes_names(self):
return json.loads(self.extras.get('themes_names', '[]'))
@cached_property
def plans_ids(self):
return json.loads(self.extras.get('plans_ids', '[]'))
@cached_property
def plans_codes(self):
return json.loads(self.extras.get('plans_codes', '[]'))
@cached_property
def plans_names(self):
return json.loads(self.extras.get('plans_names', '[]'))
@cached_property
def api_url(self):
try:
return reverse('sector-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
class Plan(models.Model):
code = models.CharField(_('code'), max_length=10,
unique=True)
name = models.CharField(_('Name'), max_length=255)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/goals/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Plan')
verbose_name_plural = _('Plans')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
super(Plan, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('plan-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
class Theme(models.Model):
plan = models.ForeignKey('goals.Plan', verbose_name='Plans',
related_name='themes')
name = models.CharField(_('Theme name'), max_length=255)
code = models.CharField(_('Theme number'), max_length=10)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Theme description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/themes/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Theme')
verbose_name_plural = _('Themes')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.extras['plan_name'] = self.plan.name
self.extras['plan_code'] = self.plan.code
if not self.slug:
self.slug = self.get_slug()
super(Theme, self).save(*args, **kwargs)
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.plan.name
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.plan.code
@cached_property
def api_url(self):
try:
return reverse('theme-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
class Goal(models.Model):
plan = models.ForeignKey('goals.Plan', verbose_name='plan',
related_name='goals')
code = models.CharField(_('Goal number'), max_length=10)
name = models.CharField(_('Goal name'), max_length=255)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Goal description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/goals/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Goal')
verbose_name_plural = _('Goals')
unique_together = ['code', 'plan']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
self.extras['plan_name'] = self.plan.name
self.extras['plan_code'] = self.plan.code
super(Goal, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('goal-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.plan.name
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.plan.code
class Target(models.Model):
goal = models.ForeignKey(Goal, verbose_name=_('Goal'),
related_name='targets')
code = models.CharField(_('Target number'), max_length=10)
name = models.CharField(_('Target'), max_length=255)
description = models.TextField(_('Target description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/targets/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Target')
verbose_name_plural = _('Targets')
unique_together = ['code', 'goal']
def __str__(self):
return '%s %s : %s' % (self.plan_code, self.code, truncatechars(self.description, 50))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
self.extras['goal_code'] = self.goal.code
self.extras['goal_name'] = self.goal.name
self.extras['plan_id'] = self.plan.id
self.extras['plan_code'] = self.plan.code
self.extras['plan_name'] = self.plan.name
super(Target, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('target-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def goal_code(self):
return self.extras.get('goal_code', '') or self.goal.code
@cached_property
def goal_name(self):
return self.extras.get('goal_name', '') or self.goal.name
@cached_property
def plan(self):
return self.goal.plan
@cached_property
def plan_id(self):
return int(self.extras.get('plan_id', '0')) or self.goal.plan_id
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.goal.plan_code
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.goal.plan_name
class Indicator(models.Model):
theme = models.ForeignKey('goals.Theme', verbose_name=_('Theme'),
related_name='indicators', null=True, blank=True)
sector = models.ForeignKey('goals.Sector', verbose_name=_('Sector'),
related_name='indicators', null=True, blank=True)
target = models.ForeignKey(Target, verbose_name=_('Target'),
related_name='indicators', null=True, blank=True)
name = models.CharField(_('Indicator'), max_length=255)
code = models.CharField(_('Indicator number'), max_length=10)
description = models.TextField(_('Indicator description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/indicators/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
sectors_ids = ArrayField(
models.IntegerField(), null=True, blank=True, editable=False,
verbose_name=_('Sectors ids'), default=[])
plan_id = models.IntegerField(_('Plan ID'), null=True, blank=True,
editable=False)
class Meta:
verbose_name = _('Indicator')
verbose_name_plural = _('Indicators')
unique_together = ['code', 'target', 'sector', 'theme']
def __str__(self):
return '%s %s : %s' \
% (self.plan_code, self.code, self.name)
def save(self, *args, **kwargs):
self.clean()
if not self.slug:
self.slug = self.get_slug()
if self.theme:
self.extras['theme_code'] = self.theme.code
self.extras['theme_name'] = self.theme.name
if self.sector:
self.sectors_ids = self.sector.ancestors_ids + [self.sector_id]
self.extras['sector_code'] = self.sector.code
self.extras['sector_name'] = self.sector.name
self.extras['sectors_codes'] = json.dumps(self.sector.ancestors_codes + [self.sector.code])
self.extras['sectors_names'] = json.dumps(self.sector.ancestors_names + [self.sector.name])
self.extras['sector_type_code'] = self.sector.type.code
self.extras['sector_type_name'] = self.sector.type.name
self.extras['root_sector_id'] = self.sector.get_root().id
self.extras['root_sector_code'] = self.sector.get_root().code
self.extras['root_sector_name'] = self.sector.get_root().name
if self.target:
self.extras['target_code'] = self.target.code
self.extras['target_name'] = self.target.name
if self.goal:
self.extras['goal_id'] = self.goal.id
self.extras['goal_code'] = self.goal.code
self.extras['goal_name'] = self.goal.name
if self.plan:
self.plan_id = self.plan.id
self.extras['plan_code'] = self.plan.code
self.extras['plan_name'] = self.plan.name
super(Indicator, self).save(*args, **kwargs)
def clean(self):
if self.theme and self.target:
if self.theme.plan_id != self.target.goal.plan_id:
raise ValidationError(
_('Theme and Target must belong to the same plan'))
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('indicator-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def theme_code(self):
return self.extras.get('theme_code', '')
@cached_property
def theme_name(self):
return self.extras.get('theme_name', '')
@cached_property
def sectors_names(self):
if self.sector:
return json.loads(self.extras.get('sectors_names', '[]'))
return []
@cached_property
def sectors_codes(self):
if self.sector:
return json.loads(self.extras.get('sectors_codes', '[]'))
return []
@cached_property
def sector_type_code(self):
return self.extras.get('sector_type_code', '')
@cached_property
def sector_type_name(self):
return self.extras.get('sector_type_name', '')
@cached_property
def sector_code(self):
return self.extras.get('sector_code', '')
@cached_property
def sector_name(self):
return self.extras.get('sector_name', '')
@cached_property
def root_sector_id(self):
return int(self.extras.get('root_sector_id', '0')) or None
@cached_property
def root_sector_code(self):
return self.extras.get('root_sector_code', '')
@cached_property
def root_sector_name(self):
return self.extras.get('root_sector_name', '')
@cached_property
def target_code(self):
return self.extras.get('target_code', '')
@cached_property
def target_name(self):
return self.extras.get('target_name', '')
@cached_property
def goal(self):
if self.target:
return self.target.goal
return None
@cached_property
def goal_id(self):
return int(self.extras.get('goal_id', '0')) or None
@cached_property
def goal_code(self):
return self.extras.get('goal_code', '')
@cached_property
def goal_name(self):
return self.extras.get('goal_name', '')
@cached_property
def plan(self):
if self.target:
return self.target.goal.plan
elif self.theme:
return self.theme.plan
return None
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '')
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '')
def get_progress_count(self):
return Progress.objects.filter(component__indicators=self.id).count()
def get_progress_preview(self):
return Progress.objects.filter(component__indicators=self.id)\
.order_by('component__indicators', '-year')\
.distinct('component__indicators')
class Component(models.Model):
YES = 'YES'
NO = 'NO'
PARTIALLY = 'PARTIALLY'
UNKNOWN = 'UNKNOWN'
STATS_AVAILABLE_CHOICES = (
(YES, _('Yes')),
(NO, _('No')),
(PARTIALLY, _('Partially')),
(UNKNOWN, _('Unknown')),
)
indicators = models.ManyToManyField('goals.Indicator',
verbose_name=_('Indicators'),
related_name='components')
code = models.CharField(_('Component number'), max_length=10,
unique=True)
name = models.CharField(_('Component name'), max_length=255)
description = models.TextField(_('Component description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/components/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
target_value = models.FloatField(_('Target value'), blank=True,
null=True)
value_unit = models.CharField(_('Value unit'), blank=True,
max_length=50)
stats_available = models.CharField(
_('Statistics availble'), max_length=50, blank=True,
choices=STATS_AVAILABLE_CHOICES, default=UNKNOWN)
data_source = models.CharField(_('Data source'), max_length=255,
blank=True)
agency = models.CharField(_('Agency'), max_length=255, blank=True)
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Component')
verbose_name_plural = _('Components')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
super(Component, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('component-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def indicators_codes(self):
return json.loads(self.extras.get('indicators_codes', '[]')) \
or list(self.indicators.values_list('code', flat=True))
@cached_property
def indicators_names(self):
return json.loads(self.extras.get('indicators_names', '[]')) \
or list(self.indicators.values_list('name', flat=True))
@cached_property
def targets_ids(self):
return json.loads(self.extras.get('targets_ids', '[]'))
@cached_property
def targets_codes(self):
return json.loads(self.extras.get('targets_codes', '[]'))
@cached_property
def targets_names(self):
return json.loads(self.extras.get('targets_names', '[]'))
@cached_property
def goals_ids(self):
return json.loads(self.extras.get('goals_ids', '[]'))
@cached_property
def goals_codes(self):
return json.loads(self.extras.get('goals_codes', '[]'))
@cached_property
def goals_names(self):
return json.loads(self.extras.get('goals_names', '[]'))
@cached_property
def plans_ids(self):
return json.loads(self.extras.get('plans_ids', '[]'))
@cached_property
def plans_codes(self):
return json.loads(self.extras.get('plans_codes', '[]'))
@cached_property
def plans_names(self):
return json.loads(self.extras.get('plans_names', '[]'))
def get_progress_count(self):
return Progress.objects.filter(component=self.id).count()
class Progress(models.Model):
component = models.ForeignKey(Component,
verbose_name=_('Component'),
related_name='progress')
area = models.ForeignKey(Area, verbose_name=_('Area'),
related_name='progress')
groups = ArrayField(
models.CharField(max_length=50, blank=True), null=True,
blank=True, verbose_name=_('Groups'), default=[])
year = models.PositiveIntegerField(_('Year'))
fiscal_year = models.CharField(_('Fiscal year'), max_length=9,
blank=True)
value = models.FloatField(_('Value'))
remarks = models.TextField(_('Remarks'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Progress')
verbose_name_plural = _('Progress')
def __str__(self):
return '%d:%d' % (self.year, self.value)
def save(self, *args, **kwargs):
self.extras['area_code'] = self.area.code
self.extras['area_name'] = self.area.name
self.extras['area_type_id'] = self.area.type_id
self.extras['area_type_code'] = self.area.type_code
self.extras['area_type_name'] = self.area.type_name
self.extras['component_code'] = self.component.code
self.extras['component_name'] = self.component.name
self.extras['value_unit'] = self.component.value_unit
super(Progress, self).save(*args, **kwargs)
@cached_property
def api_url(self):
try:
return reverse('progress-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def component_code(self):
return self.extras.get('component_code', '')\
or self.component.code
@cached_property
def component_name(self):
return self.extras.get('component_name', '')\
or self.component.name
@cached_property
def area_code(self):
return self.extras.get('area_code', '') or self.area.code
@cached_property
def area_name(self):
return self.extras.get('area_name', '') or self.area.name
@cached_property
def area_type_id(self):
return int(self.extras.get('area_type_id', 0))\
or self.area.type_id
@cached_property
def area_type_code(self):
return self.extras.get('area_type_code', '')\
or self.area.type_code
@cached_property
def area_type_name(self):
return self.extras.get('area_type_name', '')\
or self.area.type_name
@cached_property
def value_unit(self):
return self.extras.get('value_unit', '')
@receiver(m2m_changed, sender=Sector.themes.through)
def sector_themes_changed(sender, instance, action, **kwargs):
if action == 'post_add':
themes = instance.themes.prefetch_related('plan')
instance.extras['themes_codes'] = json.dumps([t.code for t in themes])
instance.extras['themes_names'] = json.dumps([t.name for t in themes])
instance.extras['plans_ids'] = json.dumps(list(set([t.plan.id for t in themes])))
instance.extras['plans_codes'] = json.dumps(list(set([t.plan.code for t in themes])))
instance.extras['plans_names'] = json.dumps(list(set([t.plan.name for t in themes])))
Sector.objects.filter(id=instance.id).update(extras=instance.extras)
@receiver(m2m_changed, sender=Component.indicators.through)
def component_indicators_changed(sender, instance, action, **kwargs):
if action == 'post_add':
indctrs = instance.indicators\
.prefetch_related('target', 'target__goal', 'target__goal__plan')
instance.extras['indicators_codes'] = json.dumps([i.code for i in indctrs])
instance.extras['indicators_names'] = json.dumps([i.name for i in indctrs])
instance.extras['targets_ids'] = json.dumps(list(set([i.target.id for i in indctrs if i.target])))
instance.extras['targets_codes'] = json.dumps(list(set([i.target.code for i in indctrs if i.target])))
instance.extras['targets_names'] = json.dumps(list(set([i.target.name for i in indctrs if i.target])))
instance.extras['goals_ids'] = json.dumps(list(set([i.target.goal.id for i in indctrs if i.target])))
instance.extras['goals_codes'] = json.dumps(list(set([i.target.goal.code for i in indctrs if i.target])))
instance.extras['goals_names'] = json.dumps(list(set([i.target.goal.name for i in indctrs if i.target])))
instance.extras['plans_ids'] = json.dumps(list(set([i.plan.id for i in indctrs if i.plan])))
instance.extras['plans_codes'] = json.dumps(list(set([i.plan.code for i in indctrs if i.plan])))
instance.extras['plans_names'] = json.dumps(list(set([i.plan.name for i in indctrs if i.plan])))
Component.objects.filter(id=instance.id).update(extras=instance.extras)
@receiver(node_moved, sender=Sector)
def sector_node_moved(sender, instance, **kwargs):
instance.extras['ancestors_ids'] = json.dumps(
[ancestor.id for ancestor in instance.get_ancestors()])
instance.extras['ancestors_codes'] = json.dumps(
[ancestor.code for ancestor in instance.get_ancestors()])
instance.extras['ancestors_names'] = json.dumps(
[ancestor.name for ancestor in instance.get_ancestors()])
Sector.objects.filter(id=instance.id).update(extras=instance.extras)
| unlicense | -7,689,396,014,878,925,000 | 35.56187 | 113 | 0.561558 | false |
atheendra/access_keys | keystone/tests/test_v3_federation.py | 1 | 58835 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
from keystone.auth import controllers as auth_controllers
from keystone.common import dependency
from keystone.common import serializer
from keystone import config
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.tests import mapping_fixtures
from keystone.tests import test_v3
CONF = config.CONF
LOG = log.getLogger(__name__)
def dummy_validator(*args, **kwargs):
pass
@dependency.requires('federation_api')
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on it's id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity."""
keys_to_check = self.idp_keys
body = self._http_idp_input()
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'description': uuid.uuid4().hex, 'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(len(entities), 1)
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
class MappingRuleEngineTests(FederationTests):
"""A class for testing the mapping rule engine."""
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
The will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
name = values.get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(name, full_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to [email protected].
RuleProcessor should raise exception.Unauthorized exception.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertEqual(group_ids, [])
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. Since no rules are
matched expect RuleProcessor to raise exception.Unauthorized
exception.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
class FederatedTokenTests(FederationTests):
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
AUTH_URL = '/auth/tokens'
def load_fixtures(self, fixtures):
super(FederationTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _assertSerializeToXML(self, json_body):
"""Serialize JSON body to XML.
Serialize JSON body to XML, then deserialize to JSON
again. Expect both JSON dictionaries to be equal.
"""
xml_body = serializer.to_xml(json_body)
json_deserialized = serializer.from_xml(xml_body)
self.assertDictEqual(json_deserialized, json_body)
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
raise AssertionError("You must specify either"
"project or domain.")
def _issue_unscoped_token(self, assertion='EMPLOYEE_ASSERTION'):
api = federation_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, assertion)
r = api.federated_authentication(context, self.IDP, self.PROTOCOL)
return r
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_serialize_to_xml(self):
"""Issue unscoped token and serialize to XML.
Make sure common.serializer doesn't complain about
the response structure and tag names.
"""
r = self._issue_unscoped_token()
token_resp = r.json_body
# Remove 'extras' if empty or None,
# as JSON and XML (de)serializers treat
# them differently, making dictionaries
# comparisions fail.
if not token_resp['token'].get('extras'):
token_resp['token'].pop('extras')
self._assertSerializeToXML(token_resp)
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(xrange(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.proj_employees['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=401)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=404)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, self.domainA['id'])
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, domain_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_list_projects(self):
url = '/OS-FEDERATION/projects'
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
projects_refs = (set([self.proj_customers['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id']]))
for token, projects_ref in zip(token, projects_refs):
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects, projects_ref)
def test_list_domains(self):
url = '/OS-FEDERATION/domains'
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains, domains_ref)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to a one of available projects
"""
r = self._issue_unscoped_token()
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/OS-FEDERATION/projects',
token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.post(self.AUTH_URL, body=v3_scope_request)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project['id'])
self._check_scoped_token_attributes(token_resp)
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(group['id'],
group)
role = self.new_role_ref()
self.assignment_api.create_role(role['id'],
role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.post(self.AUTH_URL,
body=scoped_token,
expected_status=500)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non defailt value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.assignment_api.create_domain(self.domainC['id'],
self.domainC)
# Create and add projects
self.proj_employees = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project_all['id'],
self.project_all)
# Create and add groups
self.group_employees = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_employees['id'],
self.group_employees)
self.group_customers = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_customers['id'],
self.group_customers)
self.group_admins = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_admins['id'],
self.group_admins)
# Create and add roles
self.role_employee = self.new_role_ref()
self.assignment_api.create_role(self.role_employee['id'],
self.role_employee)
self.role_customer = self.new_role_ref()
self.assignment_api.create_role(self.role_customer['id'],
self.role_customer)
self.role_admin = self.new_role_ref()
self.assignment_api.create_role(self.role_admin['id'],
self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
'any_one_of': [
'[email protected]'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
def _inject_assertion(self, context, variant):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = []
| apache-2.0 | -6,737,992,034,688,989,000 | 37.154994 | 79 | 0.516427 | false |
jstammers/EDMSuite | EDMScripts/EDMLoop.py | 1 | 21605 | # Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
def saveBlockConfig(path, config):
fs = FileStream(path, FileMode.Create)
s = XmlSerializer(BlockConfig)
s.Serialize(fs,config)
fs.Close()
def loadBlockConfig(path):
fs = FileStream(path, FileMode.Open)
s = XmlSerializer(BlockConfig)
bc = s.Deserialize(fs)
fs.Close()
return bc
def writeLatestBlockNotificationFile(cluster, blockIndex):
fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create)
sw = StreamWriter(fs)
sw.WriteLine(cluster + "\t" + str(blockIndex))
sw.Close()
fs.Close()
def checkYAGAndFix():
interlockFailed = hc.YAGInterlockFailed;
if (interlockFailed):
bh.StopPattern();
bh.StartPattern();
def printWaveformCode(bc, name):
print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted))
def prompt(text):
sys.stdout.write(text)
return sys.stdin.readline().strip()
def measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr):
fileSystem = Environs.FileSystem
print("Measuring parameters ...")
bh.StopPattern()
hc.UpdateRFPowerMonitor()
hc.UpdateRFFrequencyMonitor()
bh.StartPattern()
hc.UpdateBCurrentMonitor()
hc.UpdateVMonitor()
hc.UpdateProbeAOMFreqMonitor()
hc.UpdatePumpAOMFreqMonitor()
#hc.CheckPiMonitor()
print("Measuring polarizer angle")
hc.UpdateProbePolAngleMonitor()
hc.UpdatePumpPolAngleMonitor()
pumpPolAngle = hc.pumpPolAngle
probePolAngle = hc.probePolAngle
print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale))
print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale))
print("Bias: " + str(hc.BiasCurrent))
print("B step: " + str(abs(hc.FlipStepCurrent)))
print("DB step: " + str(abs(hc.CalStepCurrent)))
# load a default BlockConfig and customise it appropriately
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
bc = loadBlockConfig(settingsPath + "default.xml")
bc.Settings["cluster"] = cluster
bc.Settings["eState"] = eState
bc.Settings["bState"] = bState
bc.Settings["rfState"] = rfState
bc.Settings["phaseScramblerV"] = scramblerV
bc.Settings["probePolarizerAngle"] = probePolAngle
bc.Settings["pumpPolarizerAngle"] = pumpPolAngle
bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale
bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale
bc.Settings["pumpAOMFreq"] = hc.PumpAOMFrequencyCentre
bc.Settings["bBiasV"] = hc.SteppingBiasVoltage
bc.Settings["greenDCFM"] = hc.GreenSynthDCFM
bc.Settings["greenAmp"] = hc.GreenSynthOnAmplitude
bc.Settings["greenFreq"] = hc.GreenSynthOnFrequency
bc.Settings["measStartProbePwr"] = measProbePwr
bc.Settings["measStartPumpPwr"] = measPumpPwr
bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000
# these next 3, seemingly redundant, lines are to preserve backward compatibility
bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000
bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000
bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000
bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre
bc.GetModulationByName("RF1A").Step = hc.RF1AttStep
bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre
bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep
bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre
bc.GetModulationByName("RF2A").Step = hc.RF2AttStep
bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre
bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep
bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre
bc.GetModulationByName("RF1F").Step = hc.RF1FMStep
bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre
bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep
bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre
bc.GetModulationByName("RF2F").Step = hc.RF2FMStep
bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre
bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep
bc.GetModulationByName("LF1").Centre = hc.probeAOMVoltage
bc.GetModulationByName("LF1").Step = hc.probeAOMStep
bc.GetModulationByName("LF1").PhysicalCentre = hc.ProbeAOMFrequencyCentre
bc.GetModulationByName("LF1").PhysicalStep = hc.ProbeAOMFrequencyStep
bc.GetModulationByName("LF2").Centre = hc.PumpAOMVoltage
bc.GetModulationByName("LF2").Centre = hc.PumpAOMStep
bc.GetModulationByName("LF2").PhysicalCentre = hc.PumpAOMFrequencyCentre
bc.GetModulationByName("LF2").PhysicalStep = hc.PumpAOMFrequencyStep
# generate the waveform codes
print("Generating waveform codes ...")
eWave = bc.GetModulationByName("E").Waveform
eWave.Name = "E"
lf1Wave = bc.GetModulationByName("LF1").Waveform
lf1Wave.Name = "LF1"
ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F","LF2") )
bc.GetModulationByName("B").Waveform = ws["B"]
bc.GetModulationByName("DB").Waveform = ws["DB"]
bc.GetModulationByName("PI").Waveform = ws["PI"]
bc.GetModulationByName("RF1A").Waveform = ws["RF1A"]
bc.GetModulationByName("RF2A").Waveform = ws["RF2A"]
bc.GetModulationByName("RF1F").Waveform = ws["RF1F"]
bc.GetModulationByName("RF2F").Waveform = ws["RF2F"]
bc.GetModulationByName("LF2").Waveform = ws["LF2"]
# change the inversions of the static codes E and LF1
bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool()
bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool()
# print the waveform codes
# printWaveformCode(bc, "E")
# printWaveformCode(bc, "B")
# printWaveformCode(bc, "DB")
# printWaveformCode(bc, "PI")
# printWaveformCode(bc, "RF1A")
# printWaveformCode(bc, "RF2A")
# printWaveformCode(bc, "RF1F")
# printWaveformCode(bc, "RF2F")
# printWaveformCode(bc, "LF1")
# printWaveformCode(bc, "LF2")
# store e-switch info in block config
print("Storing E switch parameters ...")
bc.Settings["eRampDownTime"] = hc.ERampDownTime
bc.Settings["eRampDownDelay"] = hc.ERampDownDelay
bc.Settings["eBleedTime"] = hc.EBleedTime
bc.Settings["eSwitchTime"] = hc.ESwitchTime
bc.Settings["eRampUpTime"] = hc.ERampUpTime
bc.Settings["eRampUpDelay"] = hc.ERampUpDelay
# this is for legacy analysis compatibility
bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay
bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay
# store the E switch asymmetry in the block
bc.Settings["E0PlusBoost"] = hc.E0PlusBoost
return bc
# lock gains
# microamps of current per volt of control input
kSteppingBiasCurrentPerVolt = 2453.06
# max change in the b-bias voltage per block
kBMaxChange = 0.05
# volts of rf*a input required per cal's worth of offset
kRFAVoltsPerCal = 3.2
kRFAMaxChange = 0.1
# volts of rf*f input required per cal's worth of offset
kRFFVoltsPerCal = 8
kRFFMaxChange = 0.1
def updateLocks(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
# note the weird python syntax for a one element list
sigValue = pmtChannelValues.GetValue(("SIG",))
bValue = pmtChannelValues.GetValue(("B",))
dbValue = pmtChannelValues.GetValue(("DB",))
rf1aValue = pmtChannelValues.GetValue(("RF1A","DB"))
rf2aValue = pmtChannelValues.GetValue(("RF2A","DB"))
rf1fValue = pmtChannelValues.GetValue(("RF1F","DB"))
rf2fValue = pmtChannelValues.GetValue(("RF2F","DB"))
lf1Value = pmtChannelValues.GetValue(("LF1",))
lf1dbValue = pmtChannelValues.GetValue(("LF1","DB"))
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB: " + str(lf1dbValue)
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/10.0) * feedbackSign * (hc.CalStepCurrent * (bValue / dbValue)) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, -5, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (6.0/3.0) * (rf1aValue / dbValue) * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (6.0/3.0) * (rf2aValue / dbValue) * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (10.0/4.0) * (rf1fValue / dbValue) * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 5 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (10.0/4.0) * (rf2fValue / dbValue) * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 5 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
deltaLF1 = -1.25 * (lf1Value / dbValue)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.FLPZTVoltage - deltaLF1, hc.FLPZTStep, 5 - hc.FLPZTStep )
hc.SetFLPZTVoltage( newLF1 )
def updateLocksNL(bState):
pmtChannelValues = bh.DBlock.ChannelValues[0]
normedpmtChannelValues = bh.DBlock.ChannelValues[8]
rf1ampReftChannelValues = bh.DBlock.ChannelValues[6]
rf2ampReftChannelValues = bh.DBlock.ChannelValues[7]
# note the weird python syntax for a one element list
sigValue = pmtChannelValues.GetValue(("SIG",))
bValue = pmtChannelValues.GetValue(("B",))
dbValue = pmtChannelValues.GetValue(("DB",))
bDBValue = normedpmtChannelValues.GetSpecialValue("BDB")
rf1aValue = pmtChannelValues.GetValue(("RF1A",))
rf1adbdbValue = normedpmtChannelValues.GetSpecialValue("RF1ADBDB")
rf2aValue = pmtChannelValues.GetValue(("RF2A",))
rf2adbdbValue = normedpmtChannelValues.GetSpecialValue("RF2ADBDB")
rf1fValue = pmtChannelValues.GetValue(("RF1F",))
rf1fdbdbValue = normedpmtChannelValues.GetSpecialValue("RF1FDBDB")
rf2fValue = pmtChannelValues.GetValue(("RF2F",))
rf2fdbdbValue = normedpmtChannelValues.GetSpecialValue("RF2FDBDB")
lf1Value = pmtChannelValues.GetValue(("LF1",))
lf1dbdbValue = normedpmtChannelValues.GetSpecialValue("LF1DBDB")
lf1dbValue = normedpmtChannelValues.GetSpecialValue("LF1DB")
lf2Value = pmtChannelValues.GetValue(("LF2",))
lf2dbdbValue = pmtChannelValues.GetSpecialValue("LF2DBDB")
rf1ampRefSig = rf1ampReftChannelValues.GetValue(("SIG",))
rf2ampRefSig = rf2ampReftChannelValues.GetValue(("SIG",))
rf1ampRefE = rf1ampReftChannelValues.GetValue(("E",))
rf2ampRefE = rf2ampReftChannelValues.GetValue(("E",))
rf1ampRefEErr = rf1ampReftChannelValues.GetError(("E",))
rf2ampRefEErr = rf2ampReftChannelValues.GetError(("E",))
print "SIG: " + str(sigValue)
print "B: " + str(bValue) + " DB: " + str(dbValue)
print "B/DB" + str(bDBValue)
print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf2aValue)
print "RF1A.DB/DB: " + str(rf1adbdbValue) + " RF2A.DB/DB: " + str(rf2adbdbValue)
print "RF1F: " + str(rf1fValue) + " RF2F: " + str(rf2fValue)
print "LF1: " + str(lf1Value) + " LF1.DB/DB: " + str(lf1dbdbValue)
print "LF2: " + str(lf2Value) + " LF2.DB/DB: " + str(lf2dbdbValue)
print "RF1 Reflected: " + str(rf1ampRefSig) + " RF2 Reflected: " + str(rf2ampRefSig)
print "{E}_RF1 Reflected: {" + str(rf1ampRefE) + " , " + str(rf1ampRefEErr) + " }"
print "{E}_RF2 Reflected: {" + str(rf2ampRefE) + " , " + str(rf2ampRefEErr) + " }"
# B bias lock
# the sign of the feedback depends on the b-state
if bState:
feedbackSign = 1
else:
feedbackSign = -1
deltaBias = - (1.0/10.0) * feedbackSign * (hc.CalStepCurrent * bDBValue) / kSteppingBiasCurrentPerVolt
deltaBias = windowValue(deltaBias, -kBMaxChange, kBMaxChange)
#deltaBias = 0
print "Attempting to change stepping B bias by " + str(deltaBias) + " V."
newBiasVoltage = windowValue( hc.SteppingBiasVoltage - deltaBias, -5, 5)
hc.SetSteppingBBiasVoltage( newBiasVoltage )
# RFA locks
deltaRF1A = - (1.0/2.0) * rf1adbdbValue * kRFAVoltsPerCal
deltaRF1A = windowValue(deltaRF1A, -kRFAMaxChange, kRFAMaxChange)
#deltaRF1A = 0
newRF1A = windowValue( hc.RF1AttCentre - deltaRF1A, hc.RF1AttStep, 5 - hc.RF1AttStep)
if (newRF1A == 4.9):
newSynthAmp = hc.GreenSynthOnAmplitude + 1
print "RF1A pinned, increasing synth to " + str(newSynthAmp) + " dBm."
print "Setting RF1A to 4.5 V."
newRF1A = 4.5
hc.SetRF1AttCentre( newRF1A )
hc.SetGreenSynthAmp(newSynthAmp)
else:
print "Attempting to change RF1A by " + str(deltaRF1A) + " V."
hc.SetRF1AttCentre( newRF1A )
#
deltaRF2A = - (1.0/2.0) * rf2adbdbValue * kRFAVoltsPerCal
deltaRF2A = windowValue(deltaRF2A, -kRFAMaxChange, kRFAMaxChange)
#deltaRF2A = 0
newRF2A = windowValue( hc.RF2AttCentre - deltaRF2A, hc.RF2AttStep, 5 - hc.RF2AttStep )
if (newRF2A == 4.9):
newSynthAmp = hc.GreenSynthOnAmplitude + 1
print "RF2A pinned, increasing synth to " + str(newSynthAmp) + " dBm."
print "Setting RF2A to 4.5 V."
newRF2A = 4.5
hc.SetRF2AttCentre( newRF2A )
hc.SetGreenSynthAmp(newSynthAmp)
else:
print "Attempting to change RF2A by " + str(deltaRF2A) + " V."
hc.SetRF2AttCentre( newRF2A )
# RFF locks
deltaRF1F = - (1.0/2.0) * rf1fdbdbValue * kRFFVoltsPerCal
deltaRF1F = windowValue(deltaRF1F, -kRFFMaxChange, kRFFMaxChange)
#deltaRF1F = 0
print "Attempting to change RF1F by " + str(deltaRF1F) + " V."
newRF1F = windowValue( hc.RF1FMCentre - deltaRF1F, hc.RF1FMStep, 1.1 - hc.RF1FMStep)
hc.SetRF1FMCentre( newRF1F )
#
deltaRF2F = - (1.0/2.0) * rf2fdbdbValue * kRFFVoltsPerCal
deltaRF2F = windowValue(deltaRF2F, -kRFFMaxChange, kRFFMaxChange)
#deltaRF2F = 0
print "Attempting to change RF2F by " + str(deltaRF2F) + " V."
newRF2F = windowValue( hc.RF2FMCentre - deltaRF2F, hc.RF2FMStep, 1.1 - hc.RF2FMStep )
hc.SetRF2FMCentre( newRF2F )
# Laser frequency lock (-ve multiplier in f0 mode and +ve in f1)
deltaLF1 = -2.5* ( lf1dbdbValue)
deltaLF1 = windowValue(deltaLF1, -0.1, 0.1)
#deltaLF1 = 0
print "Attempting to change LF1 by " + str(deltaLF1) + " V."
newLF1 = windowValue( hc.probeAOMVoltage - deltaLF1, hc.probeAOMStep, 10 - hc.probeAOMStep )
hc.SetprobeAOMVoltage( newLF1 )
# Laser frequency lock (-ve multiplier in f0 mode and +ve in f1)
deltaLF2 = - 2.5 * lf2dbdbValue
deltaLF2 = windowValue(deltaLF2, -0.1, 0.1)
#deltaLF2 = 0
print "Attempting to change LF2 by " + str(deltaLF2) + " V."
newLF2 = windowValue( hc.PumpAOMVoltage - deltaLF2, hc.PumpAOMStep, 10 - hc.PumpAOMStep )
hc.SetPumpAOMVoltage( newLF2 )
def windowValue(value, minValue, maxValue):
if ( (value < maxValue) & (value > minValue) ):
return value
else:
if (value < minValue):
return minValue
else:
return maxValue
kTargetRotationPeriod = 10
kReZeroLeakageMonitorsPeriod = 10
r = Random()
def EDMGo():
# Setup
f = None
fileSystem = Environs.FileSystem
dataPath = fileSystem.GetDataDirectory(fileSystem.Paths["edmDataPath"])
settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\"
print("Data directory is : " + dataPath)
print("")
suggestedClusterName = fileSystem.GenerateNextDataFileName()
sm.SelectProfile("Scan B")
# User inputs data
cluster = prompt("Cluster name [" + suggestedClusterName +"]: ")
if cluster == "":
cluster = suggestedClusterName
print("Using cluster " + suggestedClusterName)
measProbePwr = prompt("Measured probe power (mV_3): ")
measPumpPwr = prompt("Measured pump power (mV_3): ")
nightBool = prompt("Night run (Y/N)? ")
eState = hc.EManualState
print("E-state: " + str(eState))
bState = hc.BManualState
print("B-state: " + str(bState))
rfState = hc.RFManualState
print("rf-state: " + str(rfState))
# this is to make sure the B current monitor is in a sensible state
hc.UpdateBCurrentMonitor()
# randomise Ramsey phase
scramblerV = 0.97156 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
#hc.SetRandomProbePosition()
#hc.SetRandomPumpPosition()
# calibrate leakage monitors
print("calibrating leakage monitors..")
print("E-field off")
hc.EnableGreenSynth( False )
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableBleed( False )
System.Threading.Thread.Sleep(500)
print("E-field on")
hc.EnableEField( True )
hc.EnableGreenSynth( True )
print("leakage monitors calibrated")
#print("Waiting For Polarizers (maybe)")
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr)
# loop and take data
blockIndex = 0
maxBlockIndex = 10000
dbValueList = []
Emag1List =[]
Emini1List=[]
Emini2List=[]
Emini3List=[]
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# save the block config and load into blockhead
print("Saving temp config.")
bc.Settings["clusterIndex"] = blockIndex
tempConfigFile ='%(p)stemp%(c)s_%(i)s.xml' % {'p': settingsPath, 'c': cluster, 'i': blockIndex}
saveBlockConfig(tempConfigFile, bc)
System.Threading.Thread.Sleep(500)
print("Loading temp config.")
bh.LoadConfig(tempConfigFile)
# take the block and save it
print("Running ...")
bh.AcquireAndWait()
print("Done.")
blockPath = '%(p)s%(c)s_%(i)s.zip' % {'p': dataPath, 'c': cluster, 'i': blockIndex}
bh.SaveBlock(blockPath)
print("Saved block "+ str(blockIndex) + ".")
# give mma a chance to analyse the block
print("Notifying Mathematica and waiting ...")
writeLatestBlockNotificationFile(cluster, blockIndex)
System.Threading.Thread.Sleep(5000)
print("Done.")
# increment and loop
File.Delete(tempConfigFile)
checkYAGAndFix()
blockIndex = blockIndex + 1
updateLocksNL(bState)
# randomise Ramsey phase
scramblerV = 0.97156 * r.NextDouble()
hc.SetScramblerVoltage(scramblerV)
# randomise polarizations
#hc.SetRandomProbePosition()
#hc.SetRandomPumpPosition()
bc = measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, measProbePwr, measPumpPwr)
pmtChannelValues = bh.DBlock.ChannelValues[0]
magChannelValues = bh.DBlock.ChannelValues[2]
mini1ChannelValues = bh.DBlock.ChannelValues[9]
mini2ChannelValues = bh.DBlock.ChannelValues[10]
mini3ChannelValues = bh.DBlock.ChannelValues[11]
dbValue = pmtChannelValues.GetValue(("DB",))
magEValue = magChannelValues.GetValue(("E",))
mini1EValue = mini1ChannelValues.GetValue(("E",))
mini2EValue = mini2ChannelValues.GetValue(("E",))
mini3EValue = mini3ChannelValues.GetValue(("E",))
# some code to stop EDMLoop if the laser unlocks.
# This averages the last 3 db values and stops the loop if the average is below 1
dbValueList.append(dbValue)
if (len(dbValueList) == 4):
del dbValueList[0]
print "DB values for last 3 blocks " + str(dbValueList).strip('[]')
runningdbMean =float(sum(dbValueList)) / len(dbValueList)
if ( runningdbMean < 1 and nightBool is "Y" ):
hc.EnableEField( False )
hc.SetArgonShutter( True )
break
Emag1List.append(magEValue)
if (len(Emag1List) == 11):
del Emag1List[0]
print "E_{Mag} for the last 10 blocks " + str(Emag1List).strip('[]')
runningEmag1Mean =float(sum(Emag1List)) / len(Emag1List)
print "Average E_{Mag} for the last 10 blocks " + str(runningEmag1Mean)
if (dbValue < 8):
print("Dodgy spot target rotation.")
for i in range(3):
hc.StepTarget(2)
System.Threading.Thread.Sleep(500)
if ((blockIndex % kReZeroLeakageMonitorsPeriod) == 0):
print("Recalibrating leakage monitors.")
# calibrate leakage monitors
print("calibrating leakage monitors..")
print("E-field off")
hc.EnableEField( False )
System.Threading.Thread.Sleep(10000)
hc.EnableBleed( True )
System.Threading.Thread.Sleep(5000)
hc.CalibrateIMonitors()
hc.EnableBleed( False )
System.Threading.Thread.Sleep(500)
print("E-field on")
hc.EnableEField( True )
print("leakage monitors calibrated")
bh.StopPattern()
def run_script():
EDMGo()
| mit | -8,754,331,894,581,574,000 | 39.628131 | 132 | 0.716408 | false |
Fokko/incubator-airflow | airflow/operators/cassandra_to_gcs.py | 1 | 14378 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains operator for copying
data from Cassandra to Google cloud storage in JSON format.
"""
import json
import warnings
from base64 import b64encode
from datetime import datetime
from decimal import Decimal
from tempfile import NamedTemporaryFile
from typing import Optional
from uuid import UUID
from cassandra.util import Date, OrderedMapSerializedKey, SortedSet, Time
from airflow.exceptions import AirflowException
from airflow.gcp.hooks.gcs import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.utils.decorators import apply_defaults
class CassandraToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from Cassandra to Google cloud storage in JSON format
Note: Arrays of arrays are not supported.
:param cql: The CQL to execute on the Cassandra table.
:type cql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param cassandra_conn_id: Reference to a specific Cassandra hook.
:type cassandra_conn_id: str
:param gzip: Option to compress file for upload
:type gzip: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
"""
template_fields = ('cql', 'bucket', 'filename', 'schema_filename',)
template_ext = ('.cql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
cql: str,
bucket: str,
filename: str,
schema_filename: Optional[str] = None,
approx_max_file_size_bytes: int = 1900000000,
gzip: bool = False,
cassandra_conn_id: str = 'cassandra_default',
gcp_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.cql = cql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.cassandra_conn_id = cassandra_conn_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.gzip = gzip
self.hook = None
# Default Cassandra to BigQuery type mapping
CQL_TYPE_MAP = {
'BytesType': 'BYTES',
'DecimalType': 'FLOAT',
'UUIDType': 'BYTES',
'BooleanType': 'BOOL',
'ByteType': 'INTEGER',
'AsciiType': 'STRING',
'FloatType': 'FLOAT',
'DoubleType': 'FLOAT',
'LongType': 'INTEGER',
'Int32Type': 'INTEGER',
'IntegerType': 'INTEGER',
'InetAddressType': 'STRING',
'CounterColumnType': 'INTEGER',
'DateType': 'TIMESTAMP',
'SimpleDateType': 'DATE',
'TimestampType': 'TIMESTAMP',
'TimeUUIDType': 'BYTES',
'ShortType': 'INTEGER',
'TimeType': 'TIME',
'DurationType': 'INTEGER',
'UTF8Type': 'STRING',
'VarcharType': 'STRING',
}
def execute(self, context):
cursor = self._query_cassandra()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
# Close all sessions and connection associated with this Cassandra cluster
self.hook.shutdown_cluster()
def _query_cassandra(self):
"""
Queries cassandra and returns a cursor to the results.
"""
self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
session = self.hook.get_conn()
cursor = session.execute(self.cql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
row_dict = self.generate_data_dict(row._fields, row)
s = json.dumps(row_dict).encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type))
json_serialized_schema = json.dumps(schema).encode('utf-8')
tmp_schema_file_handle.write(json_serialized_schema)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json', self.gzip)
@classmethod
def generate_data_dict(cls, names, values):
row_dict = {}
for name, value in zip(names, values):
row_dict.update({name: cls.convert_value(name, value)})
return row_dict
@classmethod
def convert_value(cls, name, value):
if not value:
return value
elif isinstance(value, (str, int, float, bool, dict)):
return value
elif isinstance(value, bytes):
return b64encode(value).decode('ascii')
elif isinstance(value, UUID):
return b64encode(value.bytes).decode('ascii')
elif isinstance(value, (datetime, Date)):
return str(value)
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Time):
return str(value).split('.')[0]
elif isinstance(value, (list, SortedSet)):
return cls.convert_array_types(name, value)
elif hasattr(value, '_fields'):
return cls.convert_user_type(name, value)
elif isinstance(value, tuple):
return cls.convert_tuple_type(name, value)
elif isinstance(value, OrderedMapSerializedKey):
return cls.convert_map_type(name, value)
else:
raise AirflowException('unexpected value: ' + str(value))
@classmethod
def convert_array_types(cls, name, value):
return [cls.convert_value(name, nested_value) for nested_value in value]
@classmethod
def convert_user_type(cls, name, value):
"""
Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ.
"""
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values)
@classmethod
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values)
@classmethod
def convert_map_type(cls, name, value):
"""
Converts a map to a repeated RECORD that contains two fields: 'key' and 'value',
each will be converted to its corresponding data type in BQ.
"""
converted_map = []
for k, v in zip(value.keys(), value.values()):
converted_map.append({
'key': cls.convert_value('key', k),
'value': cls.convert_value('value', v)
})
return converted_map
@classmethod
def generate_schema_dict(cls, name, type):
field_schema = dict()
field_schema.update({'name': name})
field_schema.update({'type': cls.get_bq_type(type)})
field_schema.update({'mode': cls.get_bq_mode(type)})
fields = cls.get_bq_fields(name, type)
if fields:
field_schema.update({'fields': fields})
return field_schema
@classmethod
def get_bq_fields(cls, name, type):
fields = []
if not cls.is_simple_type(type):
names, types = [], []
if cls.is_array_type(type) and cls.is_record_type(type.subtypes[0]):
names = type.subtypes[0].fieldnames
types = type.subtypes[0].subtypes
elif cls.is_record_type(type):
names = type.fieldnames
types = type.subtypes
if types and not names and type.cassname == 'TupleType':
names = ['field_' + str(i) for i in range(len(types))]
elif types and not names and type.cassname == 'MapType':
names = ['key', 'value']
for name, type in zip(names, types):
field = cls.generate_schema_dict(name, type)
fields.append(field)
return fields
@classmethod
def is_simple_type(cls, type):
return type.cassname in CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP
@classmethod
def is_array_type(cls, type):
return type.cassname in ['ListType', 'SetType']
@classmethod
def is_record_type(cls, type):
return type.cassname in ['UserType', 'TupleType', 'MapType']
@classmethod
def get_bq_type(cls, type):
if cls.is_simple_type(type):
return CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP[type.cassname]
elif cls.is_record_type(type):
return 'RECORD'
elif cls.is_array_type(type):
return cls.get_bq_type(type.subtypes[0])
else:
raise AirflowException('Not a supported type: ' + type.cassname)
@classmethod
def get_bq_mode(cls, type):
if cls.is_array_type(type) or type.cassname == 'MapType':
return 'REPEATED'
elif cls.is_record_type(type) or cls.is_simple_type(type):
return 'NULLABLE'
else:
raise AirflowException('Not a supported type: ' + type.cassname)
| apache-2.0 | -4,027,919,065,744,805,000 | 38.284153 | 104 | 0.627973 | false |
nshearer/etl | src/etl/EtlJoinProcessor.py | 1 | 5333 | '''
Created on Dec 28, 2012
@author: nshearer
'''
from abc import ABCMeta, abstractmethod
from EtlProcessor import EtlProcessor
class EtlJoinProcessor(EtlProcessor):
'''Join one set of records to another'''
def __init__(self):
super(EtlJoinProcessor, self).__init__()
self.__match_keys = dict() # Match Key -> (input_set, Record Key)
self.__lookup_inputs_processed = False
def list_inputs(self):
for p_input in self.list_lookup_inputs():
yield p_input
for p_input in self.list_subject_inputs():
yield p_input
# -- Override these -------------------------------------------------------
@abstractmethod
def list_lookup_inputs(self):
'''List inputs that contain the records to ref against
These record sets must be indexed
'''
@abstractmethod
def list_subject_inputs(self):
'''List inputs that contain the records to find refs for'''
@abstractmethod
def build_lookup_record_key(self, lookup_record):
'''Build a key to be used for matching subject records to'''
@abstractmethod
def build_lookup_key(self, record):
'''Build a key to use to find a lookup record'''
# -- Common join logic ----------------------------------------------------
def gen_output(self, name, inputs, record_set):
'''Generate named output data.
Dynamically calls 'gen_<name>_output' method
@param name: Name of the output to generate
@param inputs: Dictionary of connected input datasets
@param record_set: Container to populate with records
'''
if not self.__lookup_inputs_processed:
# Generate keys for lookup records
for data_port in self.list_lookup_inputs():
for input_set in inputs[data_port.name]:
for record in input_set.all_records():
# Build a Match key for this lookup record
match_key = self.build_lookup_record_key(record)
if match_key is None:
msg = "Did not build a match key for this record"
msg = record.create_msg(msg)
raise Exception(msg)
# Determine associated index
rec_index = record.index
if rec_index is None:
msg = "Record in lookup input has no index."
msg = record.create_msg(msg)
raise Exception(msg)
# Make sure match key is unique
if self.__match_keys.has_key(match_key):
handle = self._handle_duplicate_lookup_match_key
handle(match_key, record)
# Store
else:
store_rec = self._store_lookup_record
store_rec(match_key, input_set, rec_index)
self.__lookup_inputs_processed = True
# Call Parent to process subject records
super(EtlJoinProcessor, self).gen_output(name, inputs, record_set)
#def gen_invoices_output(self, inputs, output_set):
# for record_set in inputs['invoices']:
# for record in record_set.all_records():
# ref_record = self.lookup(record)
# if ref_record is not None:
# # Get values from subject
# values = record.values
#
# # Copy in values from lookup record
# for name in ['pidm', 'name', 'ssn']:
# values[name] = ref_record[name]
#
# # Output record
# output_set.add_record(values)
def lookup(self, record):
'''Find record in lookup sets for this record'''
# Build a Match key for this lookup record
match_key = self.build_lookup_key(record)
if match_key is None:
msg = "Did not build a match key for this record"
msg = record.create_msg(msg)
raise Exception(msg)
# Find match
if self.__match_keys.has_key(match_key):
input_set, lookup_index = self.__match_keys[match_key]
return input_set.get_record(lookup_index)
return None
def _handle_duplicate_lookup_match_key(self, match_key, record):
msg = "Duplicated match key '%s'" % (match_key)
msg = record.create_msg(msg)
raise Exception(msg)
def _store_lookup_record(self, match_key, lookup_set, index):
self.__match_keys[match_key] = (lookup_set, index) | gpl-2.0 | -5,025,042,689,156,766,000 | 36.381295 | 79 | 0.475342 | false |
mattduan/proof | util/UniqueList.py | 1 | 4847 | """
List with unique entries. UniqueList does not allow null nor duplicates.
"""
__version__= '$Revision: 11 $'[11:-2]
__author__ = "Duan Guoqiang ([email protected])"
class UniqueList(list):
def __init__(self, initlist=[]):
# call super class
list.__init__(self)
# add initlist
if initlist:
self.extend(initlist)
def __getslice__(self, i, j):
# return a UniqueList object
i = max(i, 0); j = max(j, 0)
return self.__class__(list.__getslice__(self, i, j))
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
# remove duplicates
uniques = []
try:
for o in other:
if o not in self:
uniques.append(o)
except TypeError:
raise TypeError( "UniqueList.__setslice__() argument %s must be iterable" % (other) )
# call super class
list.__setslice__(self, i, j, uniques)
def __add(self, l, flag=None):
""" A convenient method for all add call.
"""
if type(l) == type([]) or \
isinstance(l, UniqueList):
if flag == "r":
new_list = UniqueList()
new_list.extend(l)
new_list.extend(self)
return new_list
elif flag == "i":
self.extend(l)
return self
else:
new_list = UniqueList()
new_list.extend(self)
new_list.extend(l)
return new_list
else:
raise TypeError( """can only concatenate list/List/UniqueList (not "%s")""" % \
type(l) )
def __add__(self, l):
return self.__add(l)
def __radd__(self, l):
return self.__add(l, "r")
def __iadd__(self, l):
return self.__add(l, "i")
def __mul__(self, n):
return self
__rmul__ = __mul__
__imul__ = __mul__
def append(self, item):
""" Append an Item to the list.
@param item the Item to append
"""
if item != None and item not in self:
list.append(self, item)
def insert(self, i, item):
""" Insert an item to the list.
@param i the index to insert
@param item the item to insert
"""
if item != None and item not in self:
list.insert(self, i, item)
def extend(self, l):
""" Extend another list into this list.
@param l the list to extend
"""
try:
for i in l:
self.append(i)
except TypeError, msg:
raise TypeError("UniqueList.extend() argument must be iterable")
def clear(self):
""" Remove all items in the list.
"""
list.__init__(self, [])
# only used for test
if __name__ == '__main__':
print
print "UniqueList Test"
print
print "testing constructor"
ul1 = UniqueList()
print "ul1 (UniqueList()) => %s" % (ul1)
ul2 = UniqueList('123')
print "ul2 (UniqueList('123')) => %s" % (ul2)
ul3 = UniqueList([1,1,2,3])
print "ul3 (UniqueList([1,1,2,3])) => %s" % (ul3)
print
print 'testing type'
print "ul1 type => %s" % (type(ul1))
print "ul1 is subclass list => %s" % (issubclass(ul1.__class__, list))
print "testing append"
ul1.append(2)
print "ul1.append(2) => %s" % (ul1)
ul1.append(2)
print "ul1.append(2) => %s" % (ul1)
ul2.append(2)
print "ul2.append(2) => %s" % (ul2)
ul3.append(2)
print "ul3.append(2) => %s" % (ul3)
print
print "testing insert"
ul1.insert(1, 1)
print "ul1.insert(1, 1) => %s" % (ul1)
ul1.insert(1, 1)
print "ul1.insert(1, 1) => %s" % (ul1)
ul3.insert(3, 3)
print "ul3.insert(3, 3) => %s" % (ul3)
print
print "testing extend"
ul1.extend('123')
print "ul1.extend('123') => %s" % (ul1)
ul1.extend([1,2,3])
print "ul1.extend([1,2,3]) => %s" % (ul1)
print
print "testing +"
print "ul1 = %s" % (ul1)
print "ul2 = %s" % (ul2)
print "ul3 = %s" % (ul3)
ul4 = ul1 + ul2 + ul3
print "ul1 + ul2 + ul3 => %s" % (ul4)
print "type(ul1 + ul2 + ul3) => %s" % (type(ul4))
print "ul1 + [2,4,5] => %s" % (ul1 + [2,4,5])
print "type(ul1 + [2,4,5]) => %s" % (type(ul1 + [2,4,5]))
print
print "testing slice"
print "ul1[2:5] => %s" % (ul1[2:5])
ul1[2:5] = [1,2,3]
print "ul1[2:5] = [1,2,3]"
print "ul1 => %s" % (ul1)
print "type(ul1) => %s" % (type(ul1))
print
print "testing mul"
print "ul1 * 3 => %s" % (ul1*3)
print
print "testing clear"
ul1.clear()
print "ul1.clear() => %s" % (ul1)
print
print "done."
print
| bsd-3-clause | -1,884,556,780,845,086,200 | 25.2 | 97 | 0.482773 | false |
codelv/enaml-native | src/enamlnative/core/eventloop/interface.py | 1 | 2341 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interfaces for platform-specific functionality.
This module exists primarily for documentation purposes and as base classes
for other tornado.platform modules. Most code should import the appropriate
implementation from `tornado.platform.auto`.
"""
from __future__ import division, print_function
from atom.api import Atom, Value
def set_close_exec(fd):
"""Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
raise NotImplementedError()
class Waker(Atom):
"""A socket-like object that can wake another thread from ``select()``.
The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
thread wants to wake up the loop, it calls `wake`. Once it has woken
up, it will call `consume` to do any necessary per-wake cleanup. When
the ``IOLoop`` is closed, it closes its waker too.
"""
reader = Value()
writer = Value()
def fileno(self):
"""Returns the read file descriptor for this waker.
Must be suitable for use with ``select()`` or equivalent on the
local platform.
"""
raise NotImplementedError()
def write_fileno(self):
"""Returns the write file descriptor for this waker."""
raise NotImplementedError()
def wake(self):
"""Triggers activity on the waker's file descriptor."""
raise NotImplementedError()
def consume(self):
"""Called after the listen has woken up to do any necessary cleanup."""
raise NotImplementedError()
def close(self):
"""Closes the waker's file descriptor(s)."""
raise NotImplementedError()
def monotonic_time():
raise NotImplementedError() | mit | 415,035,762,076,911,400 | 31.527778 | 79 | 0.691158 | false |
flopezag/fiware-backlog | kernel/DataBoard.py | 1 | 5593 | from kconfig import settings
from kconfig import enablersBook, helpdeskCompBook
from kconfig import trackersBook, workGroupBook
from kernel.DataFactory import DataFactory
__author__ = "Manuel Escriche <[email protected]>"
class Data:
@staticmethod
def getUrgentDeskUpcoming():
trackers = ','.join(trackersBook[tracker].keystone for tracker in trackersBook)
jql = "duedate >= 0d AND duedate <= 7d AND status != Closed AND project in ({})".format(trackers)
return DataFactory(settings.storeHome).getQueryData('urgent.upcoming', jql)
@staticmethod
def getUrgentDeskOverdue():
trackers = ','.join(trackersBook[tracker].keystone for tracker in trackersBook)
jql = "duedate < now() AND status != Closed AND project in ({})".format(trackers)
return DataFactory(settings.storeHome).getQueryData('urgent.upcoming', jql)
@staticmethod
def getHelpDesk():
return DataFactory(settings.storeHome).getTrackerData('HELP')
@staticmethod
def getHelpDeskTechChannel():
techChannel = helpdeskCompBook['Tech']
return DataFactory(settings.storeHome).getComponentData(techChannel.key)
@staticmethod
def getDesk(desk):
return DataFactory(settings.storeHome).getTrackerData(desk.tracker)
@staticmethod
def getFocusedDesk(desk):
jql = "project = {} AND (resolution = Unresolved OR resolutiondate <= 60d)".format(desk.tracker)
return DataFactory(settings.storeHome).getQueryData('{}.focused'.format(desk.tracker), jql)
@staticmethod
def getChannel(channel):
return DataFactory(settings.storeHome).getComponentData(channel.id)
@staticmethod
def getFocusedChannel(channel):
jql = "component = {} AND (resolution = Unresolved OR resolutiondate <= 60d)".format(channel.key)
return DataFactory(settings.storeHome).getQueryData('{}.focused'.format(channel.key), jql)
@staticmethod
def getEnabler(enablername):
cmp_id = enablersBook[enablername]
return DataFactory(settings.storeHome).getComponentData(cmp_id.key)
@staticmethod
def getAccountDeskRequests():
jql = "project = FLUA AND issuetype = UpgradeAccount"
return DataFactory(settings.storeHome).getQueryData('account.requests', jql)
@staticmethod
def getFocusedAccountDeskRequest():
jql = "project = FLUA AND issuetype = UpgradeAccount AND (resolution = Unresolved OR resolutiondate <= 60d)"
return DataFactory(settings.storeHome).getQueryData('account.focusedRequests', jql)
@staticmethod
def getAccountChannelRequests(channnel):
jql = "component = {} AND issuetype = UpgradeAccount".format(channnel.key)
return DataFactory(settings.storeHome).getQueryData('account.requests', jql)
@staticmethod
def getFocusedAccountChannelRequest(channel):
jql = "component = {} AND issuetype = UpgradeAccount AND (resolution = Unresolved OR resolutiondate <= 60d)".format(channel.key)
return DataFactory(settings.storeHome).getQueryData('account.focusedRequests', jql)
@staticmethod
def getAccountDeskProvisioning():
jql = "project = FLUA AND issuetype = AccountUpgradeByNode"
return DataFactory(settings.storeHome).getQueryData('account.provisioning', jql)
@staticmethod
def getFocusedAccountDeskProvisioning():
jql = "project = FLUA AND issuetype = AccountUpgradeByNode AND (resolution = Unresolved OR resolutiondate <= 60d)"
return DataFactory(settings.storeHome).getQueryData('account.focusedProvisioning', jql)
@staticmethod
def getEnablerHelpDesk(enablername):
enabler = enablersBook[enablername]
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Enabler = '{}'".format(enablername)
return DataFactory(settings.storeHome).getQueryData('helpdesk.enabler-{}'.format(enabler.backlogKeyword), jql)
@staticmethod
def getChapterHelpDesk(chaptername):
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Chapter = '{}'".format(chaptername)
return DataFactory(settings.storeHome).getQueryData('helpdesk.chapter-{}'.format(chaptername), jql)
@staticmethod
def getNodeHelpDesk(nodename):
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Node = '{}'".format(nodename)
return DataFactory(settings.storeHome).getQueryData('helpdesk.node-{}'.format(nodename), jql)
@staticmethod
def getGlobalComponent(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getChannel(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getWorkGroups():
trackers = ','.join([workGroupBook[item].tracker for item in workGroupBook])
jql = 'project in ({})'.format(trackers)
return DataFactory(settings.storeHome).getQueryData('workgroups',jql)
@staticmethod
def getWorkGroup(key):
return DataFactory(settings.storeHome).getTrackerData(key)
@staticmethod
def getWorkGroupComponent(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getWorkGroupNoComponent(key):
return DataFactory(settings.storeHome).getTrackerNoComponentData(key)
@staticmethod
def getLab():
return DataFactory(settings.storeHome).getTrackerData('LAB')
@staticmethod
def getLabComponent(cmp):
return DataFactory(settings.storeHome).getComponentData(cmp.key)
if __name__ == "__main__":
pass
| apache-2.0 | 5,845,164,835,955,047,000 | 39.824818 | 136 | 0.714107 | false |
george-hopkins/frn-py | frn/common/protocol.py | 1 | 2718 | from twisted.protocols import basic
from frn.utils import parse_dict
class InvalidServerResponse(Exception):
pass
class InvalidClientRequest(Exception):
pass
class LineReceiver(basic.LineReceiver):
def decodedLineReceived(self, line):
"""Override this for when each line is received."""
raise NotImplementedError
def lineReceived(self, line):
"""Decode a received line."""
line = line.decode('iso-8859-1').encode('utf8')
self.decodedLineReceived(line)
def sendLine(self, line):
"""Send a line to the other end of the connection."""
line = str(line).decode('utf8').encode('iso-8859-1')
basic.LineReceiver.sendLine(self, line)
class CommandClient(LineReceiver):
def __init__(self):
self.commandQueue = []
def sendCommand(self, command, before, handler):
wasEmpty = not self.commandQueue
self.commandQueue.append((command, before, handler))
if wasEmpty:
self.__sendNextCommand()
def __sendNextCommand(self):
if self.commandQueue:
command, before, handler = self.commandQueue[0]
if before:
before()
if command:
self.sendLine(command)
else:
self.__finishCommand()
def __finishCommand(self):
if self.commandQueue:
self.commandQueue.pop(0)
self.__sendNextCommand()
def decodedLineReceived(self, line):
if self.commandQueue:
if self.commandQueue[0][2](line) is not True:
self.__finishCommand()
else:
raise InvalidServerResponse('Unexpected line receveived.')
def finish(self):
self.sendCommand(None, self.transport.loseConnection, None)
class CommandServer(LineReceiver):
def __init__(self):
self.commandHandlers = {}
def registerCommand(self, name, handler, allowedArgs=False):
self.commandHandlers[name] = (handler, allowedArgs)
def deregisterCommands(self):
self.commandHandlers = {}
def decodedLineReceived(self, line):
parts = line.split(':', 1)
command = parts[0]
if len(parts) == 1:
args = {}
elif parts[1] and parts[1][0] == '<':
args = parse_dict(parts[1])
else:
args = {'_': parts[1]}
if command in self.commandHandlers:
handler, allowedArgs = self.commandHandlers[command]
if allowedArgs is False:
handler(args)
else:
handler({key: args[key] for key in allowedArgs})
else:
raise InvalidClientRequest('Unknown command "%s".' % command)
| lgpl-3.0 | -7,237,886,593,561,025,000 | 28.868132 | 73 | 0.598234 | false |
eoconsulting/django-zoook | django_zoook/tag/views.py | 1 | 4455 | # -*- coding: utf-8 -*-
############################################################################################
#
# Zoook. OpenERP e-sale, e-commerce Open Source Management Solution
# Copyright (C) 2011 Zikzakmedia S.L. (<http://www.zikzakmedia.com>). All Rights Reserved
#
# Module Created: 03/05/2012
# Author: Mariano Ruiz <[email protected]>,
# Enterprise Objects Consulting (<http://www.eoconsulting.com.ar>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################################
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.utils.translation import get_language
from django.db.models import Q
from django_zoook.settings import *
from django_zoook.catalog.models import *
from django_zoook.tools.paginator import *
def keyword(request,tag):
"""All Products filtered by keyword"""
q = tag
values = []
if q:
kwargs_eq = {
'product_tmpl__metakeyword_'+get_language(): u'%s' % q,
}
kwargs_start = {
'product_tmpl__metakeyword_'+get_language()+'__istartswith': u'%s,' % q,
}
kwargs_md = {
'product_tmpl__metakeyword_'+get_language()+'__icontains': u',%s,' % q,
}
kwargs_end = {
'product_tmpl__metakeyword_'+get_language()+'__iendswith': u',%s' % q,
}
product_products = ProductProduct.objects.filter(
#Q(product_tmpl__status=True), Q(active=True),
Q(product_tmpl__visibility='all') | Q(product_tmpl__visibility='search') | Q(product_tmpl__visibility='catalog'),
Q(**kwargs_eq) | Q(**kwargs_start) | Q(**kwargs_md) | Q(**kwargs_end))
# Pagination options
set_paginator_options(request, 'price')
total = product_products.count()
paginator = Paginator(product_products, request.session['paginator'])
num_pages = get_num_pages(product_products, request.session['paginator'])
page = int(request.GET.get('page', '1'))
# If page request (9999) is out of range, deliver last page of results.
try:
product_products = paginator.page(page)
except (EmptyPage, InvalidPage):
product_products = paginator.page(paginator.num_pages)
# == template values ==
title = _(u"'%(tag)s' - Page %(page)s of %(total)s") % {'tag': q, 'page': product_products.number, 'total': num_pages}
metadescription = _(u"'%(tag)s' - Page %(page)s of %(total)s") % {'tag': q, 'page': product_products.number, 'total': num_pages}
category_values = {
'title': title,
'query': u'“%s”' % q,
'tag': q,
'metadescription': metadescription,
'product_products': product_products,
'paginator_option': request.session['paginator'],
'mode_option': request.session['mode'],
'order_option': request.session['order'],
'order_by_option': request.session['order_by'],
'paginator_items': PAGINATOR_ITEMS,
'catalog_orders': CATALOG_ORDERS,
'total': total,
'currency': DEFAULT_CURRENCY,
'compare_on': COMPARE_ON,
'update_price': UPDATE_PRICE,
'currency_position': CURRENCY_LABEL_POSITION,
}
return render_to_response("tag/tag.html", category_values, context_instance=RequestContext(request))
else:
raise Http404(_('This query is not available because you navigate with bookmarks or search engine. Use navigation menu'))
| agpl-3.0 | -6,514,768,064,720,154,000 | 42.213592 | 136 | 0.602112 | false |
MathieuDuponchelle/gobject-introspection | giscanner/girparser.py | 1 | 26691 | # -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
from xml.etree.cElementTree import parse
from . import ast
from .girwriter import COMPATIBLE_GIR_VERSION
from .collections import OrderedDict
CORE_NS = "http://www.gtk.org/introspection/core/1.0"
C_NS = "http://www.gtk.org/introspection/c/1.0"
GLIB_NS = "http://www.gtk.org/introspection/glib/1.0"
def _corens(tag):
return '{%s}%s' % (CORE_NS, tag)
def _glibns(tag):
return '{%s}%s' % (GLIB_NS, tag)
def _cns(tag):
return '{%s}%s' % (C_NS, tag)
class GIRParser(object):
def __init__(self, types_only=False):
self._types_only = types_only
self._namespace = None
self._filename_stack = []
# Public API
def parse(self, filename):
filename = os.path.abspath(filename)
self._filename_stack.append(filename)
tree = parse(filename)
self.parse_tree(tree)
self._filename_stack.pop()
def parse_tree(self, tree):
self._namespace = None
self._pkgconfig_packages = set()
self._includes = set()
self._c_includes = set()
self._c_prefix = None
self._parse_api(tree.getroot())
def get_namespace(self):
return self._namespace
# Private
def _find_first_child(self, node, name_or_names):
if isinstance(name_or_names, str):
for child in node.getchildren():
if child.tag == name_or_names:
return child
else:
for child in node.getchildren():
if child.tag in name_or_names:
return child
return None
def _find_children(self, node, name):
return [child for child in node.getchildren() if child.tag == name]
def _get_current_file(self):
if not self._filename_stack:
return None
cwd = os.getcwd() + os.sep
curfile = self._filename_stack[-1]
if curfile.startswith(cwd):
return curfile[len(cwd):]
return curfile
def _parse_api(self, root):
assert root.tag == _corens('repository')
version = root.attrib['version']
if version != COMPATIBLE_GIR_VERSION:
raise SystemExit("%s: Incompatible version %s (supported: %s)" %
(self._get_current_file(), version, COMPATIBLE_GIR_VERSION))
for node in root.getchildren():
if node.tag == _corens('include'):
self._parse_include(node)
elif node.tag == _corens('package'):
self._parse_pkgconfig_package(node)
elif node.tag == _cns('include'):
self._parse_c_include(node)
ns = root.find(_corens('namespace'))
assert ns is not None
identifier_prefixes = ns.attrib.get(_cns('identifier-prefixes'))
if identifier_prefixes:
identifier_prefixes = identifier_prefixes.split(',')
symbol_prefixes = ns.attrib.get(_cns('symbol-prefixes'))
if symbol_prefixes:
symbol_prefixes = symbol_prefixes.split(',')
self._namespace = ast.Namespace(ns.attrib['name'],
ns.attrib['version'],
identifier_prefixes=identifier_prefixes,
symbol_prefixes=symbol_prefixes)
if 'shared-library' in ns.attrib:
self._namespace.shared_libraries = ns.attrib['shared-library'].split(',')
self._namespace.includes = self._includes
self._namespace.c_includes = self._c_includes
self._namespace.exported_packages = self._pkgconfig_packages
parser_methods = {
_corens('alias'): self._parse_alias,
_corens('bitfield'): self._parse_enumeration_bitfield,
_corens('callback'): self._parse_callback,
_corens('class'): self._parse_object_interface,
_corens('enumeration'): self._parse_enumeration_bitfield,
_corens('interface'): self._parse_object_interface,
_corens('record'): self._parse_record,
_corens('union'): self._parse_union,
_corens('docsection'): self._parse_doc_section,
_glibns('boxed'): self._parse_boxed}
if not self._types_only:
parser_methods[_corens('constant')] = self._parse_constant
parser_methods[_corens('function')] = self._parse_function
for node in ns.getchildren():
method = parser_methods.get(node.tag)
if method is not None:
method(node)
def _parse_doc_section(self, node):
docsection = ast.DocSection(node.attrib["name"])
self._parse_generic_attribs(node, docsection)
self._namespace.append(docsection)
def _parse_include(self, node):
include = ast.Include(node.attrib['name'], node.attrib['version'])
self._includes.add(include)
def _parse_pkgconfig_package(self, node):
self._pkgconfig_packages.add(node.attrib['name'])
def _parse_c_include(self, node):
self._c_includes.add(node.attrib['name'])
def _parse_alias(self, node):
typeval = self._parse_type(node)
alias = ast.Alias(node.attrib['name'], typeval, node.attrib.get(_cns('type')))
self._parse_generic_attribs(node, alias)
self._namespace.append(alias)
def _parse_generic_attribs(self, node, obj):
assert isinstance(obj, ast.Annotated)
skip = node.attrib.get('skip')
if skip:
try:
obj.skip = int(skip) > 0
except ValueError:
obj.skip = False
introspectable = node.attrib.get('introspectable')
if introspectable:
try:
obj.introspectable = int(introspectable) > 0
except ValueError:
obj.introspectable = False
if self._types_only:
return
doc = node.find(_corens('doc'))
if doc is not None:
if doc.text:
obj.doc = doc.text
version = node.attrib.get('version')
if version:
obj.version = version
version_doc = node.find(_corens('doc-version'))
if version_doc is not None:
if version_doc.text:
obj.version_doc = version_doc.text
deprecated = node.attrib.get('deprecated-version')
if deprecated:
obj.deprecated = deprecated
deprecated_doc = node.find(_corens('doc-deprecated'))
if deprecated_doc is not None:
if deprecated_doc.text:
obj.deprecated_doc = deprecated_doc.text
stability = node.attrib.get('stability')
if stability:
obj.stability = stability
stability_doc = node.find(_corens('doc-stability'))
if stability_doc is not None:
if stability_doc.text:
obj.stability_doc = stability_doc.text
attributes = node.findall(_corens('attribute'))
if attributes:
attributes_ = OrderedDict()
for attribute in attributes:
name = attribute.attrib.get('name')
value = attribute.attrib.get('value')
attributes_[name] = value
obj.attributes = attributes_
def _parse_object_interface(self, node):
parent = node.attrib.get('parent')
if parent:
parent_type = self._namespace.type_from_name(parent)
else:
parent_type = None
ctor_kwargs = {'name': node.attrib['name'],
'parent_type': parent_type,
'gtype_name': node.attrib[_glibns('type-name')],
'get_type': node.attrib[_glibns('get-type')],
'c_symbol_prefix': node.attrib.get(_cns('symbol-prefix')),
'ctype': node.attrib.get(_cns('type'))}
if node.tag == _corens('interface'):
klass = ast.Interface
elif node.tag == _corens('class'):
klass = ast.Class
is_abstract = node.attrib.get('abstract')
is_abstract = is_abstract and is_abstract != '0'
ctor_kwargs['is_abstract'] = is_abstract
else:
raise AssertionError(node)
obj = klass(**ctor_kwargs)
self._parse_generic_attribs(node, obj)
type_struct = node.attrib.get(_glibns('type-struct'))
if type_struct:
obj.glib_type_struct = self._namespace.type_from_name(type_struct)
if klass == ast.Class:
is_fundamental = node.attrib.get(_glibns('fundamental'))
if is_fundamental and is_fundamental != '0':
obj.fundamental = True
for func_id in ['ref-func', 'unref-func',
'set-value-func', 'get-value-func']:
func_name = node.attrib.get(_glibns(func_id))
obj.__dict__[func_id.replace('-', '_')] = func_name
if self._types_only:
self._namespace.append(obj)
return
for iface in self._find_children(node, _corens('implements')):
obj.interfaces.append(self._namespace.type_from_name(iface.attrib['name']))
for iface in self._find_children(node, _corens('prerequisite')):
obj.prerequisites.append(self._namespace.type_from_name(iface.attrib['name']))
for func_node in self._find_children(node, _corens('function')):
func = self._parse_function_common(func_node, ast.Function, obj)
obj.static_methods.append(func)
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, obj)
func.is_method = True
obj.methods.append(func)
for method in self._find_children(node, _corens('virtual-method')):
func = self._parse_function_common(method, ast.VFunction, obj)
self._parse_generic_attribs(method, func)
func.is_method = True
func.invoker = method.get('invoker')
obj.virtual_methods.append(func)
for ctor in self._find_children(node, _corens('constructor')):
func = self._parse_function_common(ctor, ast.Function, obj)
func.is_constructor = True
obj.constructors.append(func)
obj.fields.extend(self._parse_fields(node, obj))
for prop in self._find_children(node, _corens('property')):
obj.properties.append(self._parse_property(prop, obj))
for signal in self._find_children(node, _glibns('signal')):
obj.signals.append(self._parse_function_common(signal, ast.Signal, obj))
self._namespace.append(obj)
def _parse_callback(self, node):
callback = self._parse_function_common(node, ast.Callback)
self._namespace.append(callback)
def _parse_function(self, node):
function = self._parse_function_common(node, ast.Function)
self._namespace.append(function)
def _parse_parameter(self, node):
typeval = self._parse_type(node)
param = ast.Parameter(node.attrib.get('name'),
typeval,
node.attrib.get('direction') or ast.PARAM_DIRECTION_IN,
node.attrib.get('transfer-ownership'),
node.attrib.get('nullable') == '1',
node.attrib.get('optional') == '1',
node.attrib.get('allow-none') == '1',
node.attrib.get('scope'),
node.attrib.get('caller-allocates') == '1')
self._parse_generic_attribs(node, param)
return param
def _parse_function_common(self, node, klass, parent=None):
name = node.attrib['name']
returnnode = node.find(_corens('return-value'))
if not returnnode:
raise ValueError('node %r has no return-value' % (name, ))
transfer = returnnode.attrib.get('transfer-ownership')
nullable = returnnode.attrib.get('nullable') == '1'
retval = ast.Return(self._parse_type(returnnode), nullable, transfer)
self._parse_generic_attribs(returnnode, retval)
parameters = []
throws = (node.attrib.get('throws') == '1')
if klass is ast.Callback:
func = klass(name, retval, parameters, throws,
node.attrib.get(_cns('type')))
elif klass is ast.Function:
identifier = node.attrib.get(_cns('identifier'))
func = klass(name, retval, parameters, throws, identifier)
elif klass is ast.VFunction:
func = klass(name, retval, parameters, throws)
elif klass is ast.Signal:
func = klass(name, retval, parameters,
when=node.attrib.get('when'),
no_recurse=node.attrib.get('no-recurse', '0') == '1',
detailed=node.attrib.get('detailed', '0') == '1',
action=node.attrib.get('action', '0') == '1',
no_hooks=node.attrib.get('no-hooks', '0') == '1')
else:
assert False
func.shadows = node.attrib.get('shadows', None)
func.shadowed_by = node.attrib.get('shadowed-by', None)
func.moved_to = node.attrib.get('moved-to', None)
func.parent = parent
parameters_node = node.find(_corens('parameters'))
if (parameters_node is not None):
paramnode = self._find_first_child(parameters_node, _corens('instance-parameter'))
if paramnode:
func.instance_parameter = self._parse_parameter(paramnode)
for paramnode in self._find_children(parameters_node, _corens('parameter')):
parameters.append(self._parse_parameter(paramnode))
for i, paramnode in enumerate(self._find_children(parameters_node,
_corens('parameter'))):
param = parameters[i]
self._parse_type_array_length(parameters, paramnode, param.type)
closure = paramnode.attrib.get('closure')
if closure:
idx = int(closure)
assert idx < len(parameters), "%d >= %d" % (idx, len(parameters))
param.closure_name = parameters[idx].argname
destroy = paramnode.attrib.get('destroy')
if destroy:
idx = int(destroy)
assert idx < len(parameters), "%d >= %d" % (idx, len(parameters))
param.destroy_name = parameters[idx].argname
self._parse_type_array_length(parameters, returnnode, retval.type)
self._parse_generic_attribs(node, func)
self._namespace.track(func)
return func
def _parse_fields(self, node, obj):
res = []
names = (_corens('field'), _corens('record'), _corens('union'), _corens('callback'))
for child in node.getchildren():
if child.tag in names:
fieldobj = self._parse_field(child, obj)
res.append(fieldobj)
return res
def _parse_compound(self, cls, node):
compound = cls(node.attrib.get('name'),
ctype=node.attrib.get(_cns('type')),
disguised=node.attrib.get('disguised') == '1',
gtype_name=node.attrib.get(_glibns('type-name')),
get_type=node.attrib.get(_glibns('get-type')),
c_symbol_prefix=node.attrib.get(_cns('symbol-prefix')))
if node.attrib.get('foreign') == '1':
compound.foreign = True
self._parse_generic_attribs(node, compound)
if not self._types_only:
compound.fields.extend(self._parse_fields(node, compound))
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, compound)
func.is_method = True
compound.methods.append(func)
for i, fieldnode in enumerate(self._find_children(node, _corens('field'))):
field = compound.fields[i]
self._parse_type_array_length(compound.fields, fieldnode, field.type)
for func in self._find_children(node, _corens('function')):
compound.static_methods.append(
self._parse_function_common(func, ast.Function, compound))
for ctor in self._find_children(node, _corens('constructor')):
func = self._parse_function_common(ctor, ast.Function, compound)
func.is_constructor = True
compound.constructors.append(func)
return compound
def _parse_record(self, node, anonymous=False):
struct = self._parse_compound(ast.Record, node)
is_gtype_struct_for = node.attrib.get(_glibns('is-gtype-struct-for'))
if is_gtype_struct_for is not None:
struct.is_gtype_struct_for = self._namespace.type_from_name(is_gtype_struct_for)
if not anonymous:
self._namespace.append(struct)
return struct
def _parse_union(self, node, anonymous=False):
union = self._parse_compound(ast.Union, node)
if not anonymous:
self._namespace.append(union)
return union
def _parse_type_simple(self, typenode):
# ast.Fields can contain inline callbacks
if typenode.tag == _corens('callback'):
typeval = self._namespace.type_from_name(typenode.attrib['name'])
typeval.ctype = typenode.attrib.get(_cns('type'))
return typeval
# ast.Arrays have their own toplevel XML
elif typenode.tag == _corens('array'):
array_type = typenode.attrib.get('name')
element_type = self._parse_type(typenode)
array_ctype = typenode.attrib.get(_cns('type'))
ret = ast.Array(array_type, element_type, ctype=array_ctype)
# zero-terminated defaults to true...
zero = typenode.attrib.get('zero-terminated')
if zero and zero == '0':
ret.zeroterminated = False
fixed_size = typenode.attrib.get('fixed-size')
if fixed_size:
ret.size = int(fixed_size)
return ret
elif typenode.tag == _corens('varargs'):
return ast.Varargs()
elif typenode.tag == _corens('type'):
name = typenode.attrib.get('name')
ctype = typenode.attrib.get(_cns('type'))
if name is None:
if ctype is None:
return ast.TypeUnknown()
return ast.Type(ctype=ctype)
elif name in ['GLib.List', 'GLib.SList']:
subchild = self._find_first_child(typenode,
map(_corens, ('callback', 'array',
'varargs', 'type')))
if subchild is not None:
element_type = self._parse_type(typenode)
else:
element_type = ast.TYPE_ANY
return ast.List(name, element_type, ctype=ctype)
elif name == 'GLib.HashTable':
subchildren = self._find_children(typenode, _corens('type'))
subchildren_types = map(self._parse_type_simple, subchildren)
while len(subchildren_types) < 2:
subchildren_types.append(ast.TYPE_ANY)
return ast.Map(subchildren_types[0], subchildren_types[1], ctype=ctype)
else:
return self._namespace.type_from_name(name, ctype)
else:
assert False, "Failed to parse inner type"
def _parse_type(self, node):
for name in map(_corens, ('callback', 'array', 'varargs', 'type')):
typenode = node.find(name)
if typenode is not None:
return self._parse_type_simple(typenode)
assert False, "Failed to parse toplevel type"
def _parse_type_array_length(self, siblings, node, typeval):
"""A hack necessary to handle the integer parameter/field indexes on
array types."""
typenode = node.find(_corens('array'))
if typenode is None:
return
lenidx = typenode.attrib.get('length')
if lenidx is not None:
idx = int(lenidx)
assert idx < len(siblings), "%r %d >= %d" % (parent, idx, len(siblings))
if isinstance(siblings[idx], ast.Field):
typeval.length_param_name = siblings[idx].name
else:
typeval.length_param_name = siblings[idx].argname
def _parse_boxed(self, node):
obj = ast.Boxed(node.attrib[_glibns('name')],
gtype_name=node.attrib[_glibns('type-name')],
get_type=node.attrib[_glibns('get-type')],
c_symbol_prefix=node.attrib.get(_cns('symbol-prefix')))
self._parse_generic_attribs(node, obj)
if self._types_only:
self._namespace.append(obj)
return
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, obj)
func.is_method = True
obj.methods.append(func)
for ctor in self._find_children(node, _corens('constructor')):
obj.constructors.append(
self._parse_function_common(ctor, ast.Function, obj))
for callback in self._find_children(node, _corens('callback')):
obj.fields.append(
self._parse_function_common(callback, ast.Callback, obj))
self._namespace.append(obj)
def _parse_field(self, node, parent):
type_node = None
anonymous_node = None
if node.tag in map(_corens, ('record', 'union')):
anonymous_elt = node
else:
anonymous_elt = self._find_first_child(node, _corens('callback'))
if anonymous_elt is not None:
if anonymous_elt.tag == _corens('callback'):
anonymous_node = self._parse_function_common(anonymous_elt, ast.Callback)
elif anonymous_elt.tag == _corens('record'):
anonymous_node = self._parse_record(anonymous_elt, anonymous=True)
elif anonymous_elt.tag == _corens('union'):
anonymous_node = self._parse_union(anonymous_elt, anonymous=True)
else:
assert False, anonymous_elt.tag
else:
assert node.tag == _corens('field'), node.tag
type_node = self._parse_type(node)
field = ast.Field(node.attrib.get('name'),
type_node,
node.attrib.get('readable') != '0',
node.attrib.get('writable') == '1',
node.attrib.get('bits'),
anonymous_node=anonymous_node)
field.private = node.attrib.get('private') == '1'
field.parent = parent
self._parse_generic_attribs(node, field)
return field
def _parse_property(self, node, parent):
prop = ast.Property(node.attrib['name'],
self._parse_type(node),
node.attrib.get('readable') != '0',
node.attrib.get('writable') == '1',
node.attrib.get('construct') == '1',
node.attrib.get('construct-only') == '1',
node.attrib.get('transfer-ownership'))
self._parse_generic_attribs(node, prop)
prop.parent = parent
return prop
def _parse_member(self, node):
member = ast.Member(node.attrib['name'],
node.attrib['value'],
node.attrib.get(_cns('identifier')),
node.attrib.get(_glibns('nick')))
self._parse_generic_attribs(node, member)
return member
def _parse_constant(self, node):
type_node = self._parse_type(node)
constant = ast.Constant(node.attrib['name'],
type_node,
node.attrib['value'],
node.attrib.get(_cns('type')))
self._parse_generic_attribs(node, constant)
self._namespace.append(constant)
def _parse_enumeration_bitfield(self, node):
name = node.attrib.get('name')
ctype = node.attrib.get(_cns('type'))
get_type = node.attrib.get(_glibns('get-type'))
type_name = node.attrib.get(_glibns('type-name'))
glib_error_domain = node.attrib.get(_glibns('error-domain'))
if node.tag == _corens('bitfield'):
klass = ast.Bitfield
else:
klass = ast.Enum
members = []
obj = klass(name, ctype,
members=members,
gtype_name=type_name,
get_type=get_type)
obj.error_domain = glib_error_domain
obj.ctype = ctype
self._parse_generic_attribs(node, obj)
if self._types_only:
self._namespace.append(obj)
return
for member_node in self._find_children(node, _corens('member')):
member = self._parse_member(member_node)
member.parent = obj
members.append(member)
for func_node in self._find_children(node, _corens('function')):
func = self._parse_function_common(func_node, ast.Function)
func.parent = obj
obj.static_methods.append(func)
self._namespace.append(obj)
| gpl-2.0 | -4,067,458,910,382,923,000 | 42.259319 | 94 | 0.558878 | false |
philanthropy-u/edx-platform | lms/djangoapps/courseware/views/index.py | 1 | 25792 | """
View for Courseware Index
"""
# pylint: disable=attribute-defined-outside-init
import logging
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import redirect_to_login
from django.urls import reverse
from django.http import Http404
from django.template.context_processors import csrf
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import View
from edx_django_utils.monitoring import set_custom_metrics_for_course_key
from opaque_keys.edx.keys import CourseKey
from web_fragments.fragment import Fragment
from edxmako.shortcuts import render_to_response, render_to_string
from lms.djangoapps.courseware.courses import allow_public_access
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.gating.api import get_entrance_exam_score_ratio, get_entrance_exam_usage_key
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.course_experience import (
COURSE_OUTLINE_PAGE_FLAG, default_course_url_name, COURSE_ENABLE_UNENROLLED_ACCESS_FLAG
)
from openedx.features.course_experience.views.course_sock import CourseSockFragmentView
from openedx.features.enterprise_support.api import data_sharing_consent_required
from shoppingcart.models import CourseRegistrationCode
from student.views import is_course_blocked
from util.views import ensure_valid_course_key
from xmodule.modulestore.django import modulestore
from xmodule.course_module import COURSE_VISIBILITY_PUBLIC
from xmodule.x_module import PUBLIC_VIEW, STUDENT_VIEW
from .views import CourseTabView
from ..access import has_access
from ..courses import check_course_access, get_course_with_access, get_current_child, get_studio_url
from ..entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam
)
from ..masquerade import (
setup_masquerade,
check_content_start_date_for_masquerade_user
)
from ..model_data import FieldDataCache
from ..module_render import get_module_for_descriptor, toc_for_course
log = logging.getLogger("edx.courseware.views.index")
TEMPLATE_IMPORTS = {'urllib': urllib}
CONTENT_DEPTH = 2
class CoursewareIndex(View):
"""
View class for the Courseware page.
"""
@cached_property
def enable_unenrolled_access(self):
return COURSE_ENABLE_UNENROLLED_ACCESS_FLAG.is_enabled(self.course_key)
@method_decorator(ensure_csrf_cookie)
@method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True))
@method_decorator(ensure_valid_course_key)
@method_decorator(data_sharing_consent_required)
def get(self, request, course_id, chapter=None, section=None, position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right
chapter.
If neither chapter or section are specified, displays the user's most
recent chapter, or the first chapter if this is the user's first visit.
Arguments:
request: HTTP request
course_id (unicode): course id
chapter (unicode): chapter url_name
section (unicode): section url_name
position (unicode): position in module, eg of <sequential> module
"""
self.course_key = CourseKey.from_string(course_id)
if not (request.user.is_authenticated or self.enable_unenrolled_access):
return redirect_to_login(request.get_full_path())
self.original_chapter_url_name = chapter
self.original_section_url_name = section
self.chapter_url_name = chapter
self.section_url_name = section
self.position = position
self.chapter, self.section = None, None
self.course = None
self.url = request.path
try:
set_custom_metrics_for_course_key(self.course_key)
self._clean_position()
with modulestore().bulk_operations(self.course_key):
self.view = STUDENT_VIEW
# Do the enrollment check if enable_unenrolled_access is not enabled.
self.course = get_course_with_access(
request.user, 'load', self.course_key,
depth=CONTENT_DEPTH,
check_if_enrolled=not self.enable_unenrolled_access,
)
if self.enable_unenrolled_access:
# Check if the user is considered enrolled (i.e. is an enrolled learner or staff).
try:
check_course_access(
self.course, request.user, 'load', check_if_enrolled=True,
)
except CourseAccessRedirect as exception:
# If the user is not considered enrolled:
if self.course.course_visibility == COURSE_VISIBILITY_PUBLIC:
# If course visibility is public show the XBlock public_view.
self.view = PUBLIC_VIEW
else:
# Otherwise deny them access.
raise exception
else:
# If the user is considered enrolled show the default XBlock student_view.
pass
self.is_staff = has_access(request.user, 'staff', self.course)
self._setup_masquerade_for_effective_user()
return self.render(request)
except Exception as exception: # pylint: disable=broad-except
return CourseTabView.handle_exceptions(request, self.course, exception)
def _setup_masquerade_for_effective_user(self):
"""
Setup the masquerade information to allow the request to
be processed for the requested effective user.
"""
self.real_user = self.request.user
self.masquerade, self.effective_user = setup_masquerade(
self.request,
self.course_key,
self.is_staff,
reset_masquerade_data=True
)
# Set the user in the request to the effective user.
self.request.user = self.effective_user
def render(self, request):
"""
Render the index page.
"""
self._redirect_if_needed_to_pay_for_course()
self._prefetch_and_bind_course(request)
if self.course.has_children_at_depth(CONTENT_DEPTH):
self._reset_section_to_exam_if_required()
self.chapter = self._find_chapter()
self.section = self._find_section()
if self.chapter and self.section:
self._redirect_if_not_requested_section()
self._save_positions()
self._prefetch_and_bind_section()
check_content_start_date_for_masquerade_user(self.course_key, self.effective_user, request,
self.course.start, self.chapter.start, self.section.start)
if not request.user.is_authenticated:
qs = urllib.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
allow_anonymous = allow_public_access(self.course, [COURSE_VISIBILITY_PUBLIC])
if not allow_anonymous:
PageLevelMessages.register_warning_message(
request,
Text(_("You are not signed in. To see additional course content, {sign_in_link} or "
"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML('<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML('<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
)
return render_to_response('courseware/courseware.html', self._create_courseware_context(request))
def _redirect_if_not_requested_section(self):
"""
If the resulting section and chapter are different from what was initially
requested, redirect back to the index page, but with an updated URL that includes
the correct section and chapter values. We do this so that our analytics events
and error logs have the appropriate URLs.
"""
if (
self.chapter.url_name != self.original_chapter_url_name or
(self.original_section_url_name and self.section.url_name != self.original_section_url_name)
):
raise CourseAccessRedirect(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course_key),
'chapter': self.chapter.url_name,
'section': self.section.url_name,
},
)
)
def _clean_position(self):
"""
Verify that the given position is an integer. If it is not positive, set it to 1.
"""
if self.position is not None:
try:
self.position = max(int(self.position), 1)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(self.position))
def _redirect_if_needed_to_pay_for_course(self):
"""
Redirect to dashboard if the course is blocked due to non-payment.
"""
redeemed_registration_codes = []
if self.request.user.is_authenticated:
self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=self.course_key,
registrationcoderedemption__redeemed_by=self.real_user
)
if is_course_blocked(self.request, redeemed_registration_codes, self.course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
# TODO Update message to account for the fact that the user is not authenticated.
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
self.real_user,
unicode(self.course_key),
)
raise CourseAccessRedirect(reverse('dashboard'))
def _reset_section_to_exam_if_required(self):
"""
Check to see if an Entrance Exam is required for the user.
"""
if not user_can_skip_entrance_exam(self.effective_user, self.course):
exam_chapter = get_entrance_exam_content(self.effective_user, self.course)
if exam_chapter and exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
self.chapter_url_name = exam_chapter.url_name
self.section_url_name = exam_section.url_name
def _get_language_preference(self):
"""
Returns the preferred language for the actual user making the request.
"""
language_preference = settings.LANGUAGE_CODE
if self.request.user.is_authenticated:
language_preference = get_user_preference(self.real_user, LANGUAGE_KEY)
return language_preference
def _is_masquerading_as_student(self):
"""
Returns whether the current request is masquerading as a student.
"""
return self.masquerade and self.masquerade.role == 'student'
def _is_masquerading_as_specific_student(self):
"""
Returns whether the current request is masqueurading as a specific student.
"""
return self._is_masquerading_as_student() and self.masquerade.user_name
def _find_block(self, parent, url_name, block_type, min_depth=None):
"""
Finds the block in the parent with the specified url_name.
If not found, calls get_current_child on the parent.
"""
child = None
if url_name:
child = parent.get_child_by(lambda m: m.location.block_id == url_name)
if not child:
# User may be trying to access a child that isn't live yet
if not self._is_masquerading_as_student():
raise Http404('No {block_type} found with name {url_name}'.format(
block_type=block_type,
url_name=url_name,
))
elif min_depth and not child.has_children_at_depth(min_depth - 1):
child = None
if not child:
child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child"))
return child
def _find_chapter(self):
"""
Finds the requested chapter.
"""
return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1)
def _find_section(self):
"""
Finds the requested section.
"""
if self.chapter:
return self._find_block(self.chapter, self.section_url_name, 'section')
def _prefetch_and_bind_course(self, request):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key,
self.effective_user,
self.course,
depth=CONTENT_DEPTH,
read_only=CrawlersConfig.is_crawler(request),
)
self.course = get_module_for_descriptor(
self.effective_user,
self.request,
self.course,
self.field_data_cache,
self.course_key,
course=self.course,
)
def _prefetch_and_bind_section(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
# Pre-fetch all descendant data
self.section = modulestore().get_item(self.section.location, depth=None, lazy=False)
self.field_data_cache.add_descriptor_descendents(self.section, depth=None)
# Bind section to user
self.section = get_module_for_descriptor(
self.effective_user,
self.request,
self.section,
self.field_data_cache,
self.course_key,
self.position,
course=self.course,
)
def _save_positions(self):
"""
Save where we are in the course and chapter.
"""
save_child_position(self.course, self.chapter_url_name)
save_child_position(self.chapter, self.section_url_name)
#TODO move this method in philu app
def can_view_score(self):
"""
Check if user is allowed to view score
:return: Boolean
"""
from lms.djangoapps.philu_api.helpers import get_course_custom_settings
from courseware.access import get_user_role
course_custom_settings = get_course_custom_settings(self.course.course_id)
current_user_role = get_user_role(self.request.user, self.course.course_id)
return course_custom_settings.show_grades or current_user_role in ["staff", 'instructor']
def _create_courseware_context(self, request):
"""
Returns and creates the rendering context for the courseware.
Also returns the table of contents for the courseware.
"""
course_url_name = default_course_url_name(self.course.id)
course_url = reverse(course_url_name, kwargs={'course_id': unicode(self.course.id)})
show_grades = self.can_view_score()
courseware_context = {
'show_grades': show_grades,
'csrf': csrf(self.request)['csrf_token'],
'course': self.course,
'course_url': course_url,
'chapter': self.chapter,
'section': self.section,
'init': '',
'fragment': Fragment(),
'staff_access': self.is_staff,
'masquerade': self.masquerade,
'supports_preview_menu': True,
'studio_url': get_studio_url(self.course, 'course'),
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'bookmarks_api_url': reverse('bookmarks'),
'language_preference': self._get_language_preference(),
'disable_optimizely': not WaffleSwitchNamespace('RET').is_enabled('enable_optimizely_in_courseware'),
'section_title': None,
'sequence_title': None,
'disable_accordion': COURSE_OUTLINE_PAGE_FLAG.is_enabled(self.course.id),
}
courseware_context.update(
get_experiment_user_metadata_context(
self.course,
self.effective_user,
)
)
table_of_contents = toc_for_course(
self.effective_user,
self.request,
self.course,
self.chapter_url_name,
self.section_url_name,
self.field_data_cache,
)
# TODO: Move this section out as we are changing built in edx code
default_chapter = ''
if self.chapter:
default_chapter = self.chapter.display_name
if self.section:
default_chapter = "%s-%s" % (default_chapter, self.section.display_name)
active_tab = self.request.GET.get('active_tab', default_chapter)
courseware_context['toc'] = table_of_contents
courseware_context['active_tab'] = active_tab
courseware_context['accordion'] = render_accordion(
self.request,
self.course,
table_of_contents['chapters'],
)
courseware_context['course_sock_fragment'] = CourseSockFragmentView().render_to_fragment(
request, course=self.course)
# entrance exam data
self._add_entrance_exam_to_context(courseware_context)
if self.section:
# chromeless data
if self.section.chrome:
chrome = [s.strip() for s in self.section.chrome.lower().split(",")]
if 'accordion' not in chrome:
courseware_context['disable_accordion'] = True
if 'tabs' not in chrome:
courseware_context['disable_tabs'] = True
# default tab
if self.section.default_tab:
courseware_context['default_tab'] = self.section.default_tab
# section data
courseware_context['section_title'] = self.section.display_name_with_default
section_context = self._create_section_context(
table_of_contents['previous_of_active_section'],
table_of_contents['next_of_active_section'],
)
courseware_context['fragment'] = self.section.render(self.view, section_context)
if self.section.position and self.section.has_children:
self._add_sequence_title_to_context(courseware_context)
return courseware_context
def _add_sequence_title_to_context(self, courseware_context):
"""
Adds sequence title to the given context.
If we're rendering a section with some display items, but position
exceeds the length of the displayable items, default the position
to the first element.
"""
display_items = self.section.get_display_items()
if not display_items:
return
if self.section.position > len(display_items):
self.section.position = 1
courseware_context['sequence_title'] = display_items[self.section.position - 1].display_name_with_default
def _add_entrance_exam_to_context(self, courseware_context):
"""
Adds entrance exam related information to the given context.
"""
if course_has_entrance_exam(self.course) and getattr(self.chapter, 'is_entrance_exam', False):
courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.effective_user, self.course)
courseware_context['entrance_exam_current_score'] = get_entrance_exam_score_ratio(
CourseGradeFactory().read(self.effective_user, self.course),
get_entrance_exam_usage_key(self.course),
)
def _create_section_context(self, previous_of_active_section, next_of_active_section):
"""
Returns and creates the rendering context for the section.
"""
def _compute_section_url(section_info, requested_child):
"""
Returns the section URL for the given section_info with the given child parameter.
"""
return "{url}?child={requested_child}".format(
url=reverse(
'courseware_section',
args=[unicode(self.course_key), section_info['chapter_url_name'], section_info['url_name']],
),
requested_child=requested_child,
)
# NOTE (CCB): Pull the position from the URL for un-authenticated users. Otherwise, pull the saved
# state from the data store.
position = None if self.request.user.is_authenticated else self.position
section_context = {
'activate_block_id': self.request.GET.get('activate_block_id'),
'requested_child': self.request.GET.get("child"),
'progress_url': reverse('progress', kwargs={'course_id': unicode(self.course_key)}),
'user_authenticated': self.request.user.is_authenticated,
'position': position,
}
if previous_of_active_section:
section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last')
if next_of_active_section:
section_context['next_url'] = _compute_section_url(next_of_active_section, 'first')
# sections can hide data that masquerading staff should see when debugging issues with specific students
section_context['specific_masquerade'] = self._is_masquerading_as_specific_student()
return section_context
def render_accordion(request, course, table_of_contents):
"""
Returns the HTML that renders the navigation for the given course.
Expects the table_of_contents to have data on each chapter and section,
including which ones are active.
"""
context = dict(
[
('toc', table_of_contents),
('course_id', unicode(course.id)),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format),
] + TEMPLATE_IMPORTS.items()
)
return render_to_string('courseware/accordion.html', context)
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, child in enumerate(seq_module.get_display_items(), start=1):
if child.location.block_id == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.block_id)
current_module = parent
| agpl-3.0 | 928,075,862,110,087,700 | 40.734628 | 120 | 0.61426 | false |
ljwolf/spvcm | spvcm/both_levels/se_se/sample.py | 1 | 1359 | import numpy as np
import scipy.linalg as scla
from ...utils import splogdet
from pysal.spreg.utils import spdot
def logp_rho_prec(state, val):
"""
This computes the logp of the spatial parameter using the precision, rather than the covariance. This results in fewer matrix operations in the case of a SE formulation, but not in an SMA formulation.
"""
st = state
#must truncate in logp otherwise sampling gets unstable
if (val < st.Rho_min) or (val > st.Rho_max):
return np.array([-np.inf])
PsiRhoi = st.Psi_1i(val, st.W, sparse=True)
logdet = splogdet(PsiRhoi)
eta = st.Y - st.XBetas - st.DeltaAlphas
kernel = spdot(spdot(eta.T, PsiRhoi), eta) / st.Sigma2
return .5*logdet -.5 * kernel + st.Log_Rho0(val) #since precision, no negative on ld
def logp_lambda_prec(state, val):
"""
The logp for upper level spatial parameters in this case has the same form
as a multivariate normal distribution, sampled over the variance matrix,
rather than over Y.
"""
st = state
#must truncate
if (val < st.Lambda_min) or (val > st.Lambda_max):
return np.array([-np.inf])
PsiLambdai = st.Psi_2i(val, st.M)
logdet = splogdet(PsiLambdai)
kernel = spdot(spdot(st.Alphas.T, PsiLambdai), st.Alphas) / st.Tau2
return .5*logdet - .5*kernel + st.Log_Lambda0(val)
| mit | 5,092,001,588,908,868,000 | 31.357143 | 204 | 0.671082 | false |
funkring/fdoo | addons-funkring/at_stock/__openerp__.py | 1 | 1542 | # -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "oerp.at Stock",
"description":"""
oerp.at Stock Base Module
=========================
* additional functions
* access rights for invoice creation
""",
"version" : "1.1",
"author" : "funkring.net",
"website": "http://www.funkring.net",
"category" : "Warehouse",
"depends" : ["at_base", "stock", "delivery", "stock_account"],
"data" : ["security.xml",
"report/stock_picking_report.xml",
"view/picking_view.xml",
"wizard/check_avail_wizard.xml"],
"auto_install": False,
"installable": True
}
| agpl-3.0 | -4,827,440,454,047,546,000 | 34.045455 | 78 | 0.57393 | false |
sanal-cem/heritago | heritago/heritages/tests/tests_models.py | 1 | 2021 | from django.test import TestCase
from heritages.models import Tag, Heritage, Multimedia, UserProfile, User
import os
testfile = "testfile.txt"
testfile2 = "testfile2.txt"
class ModelsTest(TestCase):
@classmethod
def setUpClass(cls):
Tag.objects.create(name="TAG_ancient")
title = "Test Mosque"
Heritage.objects.create(title=title)
file = open(testfile, "w")
file.close()
file = open(testfile2, "w")
file.close()
Multimedia.objects.create(url="B", file=testfile, heritage=Heritage.objects.get(title="Test Mosque"))
Heritage.objects.create(title="Selimiye Mosque")
Multimedia.objects.create(url="A", file=testfile2, heritage=Heritage.objects.get(title="Selimiye Mosque"))
@classmethod
def tearDownClass(cls):
try:
os.remove(testfile)
except OSError:
pass
try:
os.remove(testfile2)
except OSError:
pass
def test_tag_get(self):
ancient_tag = Tag.objects.get(name="TAG_ancient")
self.assertEqual(ancient_tag.name, "TAG_ancient")
def test_heritage_get(self):
test_mosque = Heritage.objects.get(title="Test Mosque")
self.assertEqual(test_mosque.title, "Test Mosque")
def test_heritage_delete(self):
Heritage.objects.get(title="Test Mosque").delete()
with self.assertRaises(Heritage.DoesNotExist):
Heritage.objects.get(title="Test Mosque")
def test_multimedia_delete(self):
Multimedia.objects.get(url="A").delete()
with self.assertRaises(Multimedia.DoesNotExist):
Multimedia.objects.get(url="A")
def test_userprofile(self):
user = User.objects.create(username="testuser")
user_profile = UserProfile.objects.create(user=user, email="[email protected]", note="Test Note")
self.assertEqual("testuser", str(user_profile)
, "__unicode__ fails, replace with __str__ then you'll pass this test")
| mit | 3,023,365,666,309,742,000 | 33.844828 | 114 | 0.641267 | false |
alejandro-perez/pyikev2 | ikesacontroller.py | 1 | 9354 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This module defines the classes for the protocol handling.
"""
import json
import logging
import os
import socket
from ipaddress import ip_address, ip_network
from select import select
import xfrm
from ikesa import IkeSa
from message import (Message, TrafficSelector)
__author__ = 'Alejandro Perez-Mendez <[email protected]>'
class IkeSaController:
def __init__(self, my_addrs, configuration):
self.ike_sas = []
self.configuration = configuration
self.my_addrs = my_addrs
self.cookie_threshold = 10
self.cookie_secret = os.urandom(8)
# establish policies
xfrm.Xfrm.flush_policies()
xfrm.Xfrm.flush_sas()
for ike_conf in self.configuration.ike_configurations:
xfrm.Xfrm.create_policies(ike_conf)
def _get_ike_sa_by_spi(self, spi):
return next(x for x in self.ike_sas if x.my_spi == spi)
def _get_ike_sa_by_peer_addr(self, peer_addr):
return next(x for x in self.ike_sas if x.peer_addr == peer_addr)
def _get_ike_sa_by_child_sa_spi(self, spi):
for ike_sa in self.ike_sas:
for child_sa in ike_sa.child_sas:
if child_sa.inbound_spi == spi or child_sa.outbound_spi == spi:
return ike_sa
return None
def dispatch_message(self, data, my_addr, peer_addr):
header = Message.parse(data, header_only=True)
# if IKE_SA_INIT request, then a new IkeSa must be created
if header.exchange_type == Message.Exchange.IKE_SA_INIT and header.is_request:
# look for matching configuration
ike_conf = self.configuration.get_ike_configuration(ip_address(peer_addr))
ike_sa = IkeSa(is_initiator=False, peer_spi=header.spi_i, configuration=ike_conf,
my_addr=ip_address(my_addr), peer_addr=ip_address(peer_addr))
self.ike_sas.append(ike_sa)
if sum(1 for x in self.ike_sas if x.state < IkeSa.State.ESTABLISHED) > self.cookie_threshold:
ike_sa.cookie_secret = self.cookie_secret
logging.info(f'Starting the creation of IKE SA={ike_sa}. Count={len(self.ike_sas)}')
# else, look for the IkeSa in the dict
else:
my_spi = header.spi_r if header.is_initiator else header.spi_i
try:
ike_sa = self._get_ike_sa_by_spi(my_spi)
except StopIteration:
logging.warning(f'Received message for unknown SPI={my_spi.hex()}. Omitting.')
return None
# generate the reply (if any)
reply = ike_sa.process_message(data)
# if rekeyed, add the new IkeSa
if ike_sa.state in (IkeSa.State.REKEYED, IkeSa.State.DEL_AFTER_REKEY_IKE_SA_REQ_SENT):
self.ike_sas.append(ike_sa.new_ike_sa)
logging.info(f'IKE SA={ike_sa.new_ike_sa} created by rekey. Count={len(self.ike_sas)}')
# if the IKE_SA needs to be closed
if ike_sa.state == IkeSa.State.DELETED:
ike_sa.delete_child_sas()
self.ike_sas.remove(ike_sa)
logging.info(f'Deleted IKE_SA={ike_sa}. Count={len(self.ike_sas)}')
return reply
def process_acquire(self, xfrm_acquire, attributes):
family = attributes[xfrm.XFRMA_TMPL].family
peer_addr = xfrm_acquire.id.daddr.to_ipaddr(family)
logging.debug('Received acquire for {}'.format(peer_addr))
# look for an active IKE_SA with the peer
try:
ike_sa = self._get_ike_sa_by_peer_addr(peer_addr)
except StopIteration:
my_addr = xfrm_acquire.saddr.to_ipaddr(family)
ike_conf = self.configuration.get_ike_configuration(peer_addr)
# create new IKE_SA (for now)
ike_sa = IkeSa(is_initiator=True, peer_spi=b'\0'*8, configuration=ike_conf, my_addr=my_addr,
peer_addr=peer_addr)
self.ike_sas.append(ike_sa)
logging.info(f'Starting the creation of IKE SA={ike_sa}. Count={len(self.ike_sas)}')
sel_family = xfrm_acquire.sel.family
small_tsi = TrafficSelector.from_network(ip_network(xfrm_acquire.sel.saddr.to_ipaddr(sel_family)),
xfrm_acquire.sel.sport, xfrm_acquire.sel.proto)
small_tsr = TrafficSelector.from_network(ip_network(xfrm_acquire.sel.daddr.to_ipaddr(sel_family)),
xfrm_acquire.sel.dport, xfrm_acquire.sel.proto)
request = ike_sa.process_acquire(small_tsi, small_tsr, xfrm_acquire.policy.index >> 3)
# look for ipsec configuration
return request, ike_sa.my_addr, ike_sa.peer_addr
def process_expire(self, xfrm_expire):
spi = bytes(xfrm_expire.state.id.spi)
hard = xfrm_expire.hard
logging.debug(f'Received EXPIRE for CHILD_SA SPI={spi.hex()}. Hard={hard}')
ike_sa = self._get_ike_sa_by_child_sa_spi(spi)
if ike_sa:
request = ike_sa.process_expire(spi, hard)
return request, ike_sa.my_addr, ike_sa.peer_addr
return None, None, None
def main_loop(self):
# create network sockets
udp_sockets = {}
port = 500
for addr in self.my_addrs:
udp_sockets[addr] = socket.socket(socket.AF_INET6 if addr.version == 6 else socket.AF_INET, socket.SOCK_DGRAM)
udp_sockets[addr].bind((str(addr), port))
logging.info(f'Listening from [{addr}]:{port}')
self.control_socket = control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.control_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
control_addr = ("127.0.0.1", 9999)
control_socket.bind(control_addr)
control_socket.listen()
logging.info(f'Listening control events on [{control_addr[0]}]:{control_addr[1]}')
# create XFRM socket
xfrm_socket = xfrm.Xfrm.get_socket()
logging.info('Listening XFRM events.')
allsockets = list(udp_sockets.values()) + [xfrm_socket, control_socket]
# do server
while True:
try:
readable = select(allsockets, [], [], 1)[0]
for my_addr, sock in udp_sockets.items():
if sock in readable:
data, peer_addr = sock.recvfrom(4096)
data = self.dispatch_message(data, my_addr, peer_addr[0])
if data:
sock.sendto(data, peer_addr)
if xfrm_socket in readable:
data = xfrm_socket.recv(4096)
header, msg, attributes = xfrm.Xfrm.parse_message(data)
reply_data, my_addr, peer_addr = None, None, None
if header.type == xfrm.XFRM_MSG_ACQUIRE:
reply_data, my_addr, peer_addr = self.process_acquire(msg, attributes)
elif header.type == xfrm.XFRM_MSG_EXPIRE:
reply_data, my_addr, peer_addr = self.process_expire(msg)
if reply_data:
dst_addr = (str(peer_addr), 500)
udp_sockets[my_addr].sendto(reply_data, dst_addr)
if control_socket in readable:
conn, addr = control_socket.accept()
data = conn.recv(4096)
result = []
for ikesa in self.ike_sas:
result.append(ikesa.to_dict())
conn.sendall(json.dumps(result).encode())
conn.close()
# check retransmissions
for ikesa in self.ike_sas:
request_data = ikesa.check_retransmission_timer()
if request_data:
dst_addr = (str(ikesa.peer_addr), 500)
udp_sockets[ikesa.my_addr].sendto(request_data, dst_addr)
if ikesa.state == IkeSa.State.DELETED:
ikesa.delete_child_sas()
self.ike_sas.remove(ikesa)
logging.info('Deleted IKE_SA {}. Count={}'.format(ikesa, len(self.ike_sas)))
# start DPD
for ikesa in self.ike_sas:
request_data = ikesa.check_dead_peer_detection_timer()
if request_data:
dst_addr = (str(ikesa.peer_addr), 500)
udp_sockets[ikesa.my_addr].sendto(request_data, dst_addr)
# start IKE_SA rekeyings
for ikesa in self.ike_sas:
request_data = ikesa.check_rekey_ike_sa_timer()
if request_data:
dst_addr = (str(ikesa.peer_addr), 500)
udp_sockets[ikesa.my_addr].sendto(request_data, dst_addr)
except socket.gaierror as ex:
logging.error(f'Problem sending message: {ex}')
except KeyError as ex:
logging.error(f'Could not find socket with the appropriate source address: {str(ex)}')
def close(self):
xfrm.Xfrm.flush_policies()
xfrm.Xfrm.flush_sas()
logging.info('Closing IKE_SA controller')
self.control_socket.close()
| gpl-3.0 | -879,382,703,224,452,000 | 44.188406 | 122 | 0.569168 | false |
evgenybf/pyXLWriter | tests/test_Worksheet.py | 1 | 3073 | #!/usr/bin/env python
__revision__ = """$Id: test_Worksheet.py,v 1.12 2004/08/17 07:22:38 fufff Exp $"""
import os, os.path
import unittest
import testsupport
from testsupport import read_file
from pyXLWriter.Worksheet import Worksheet
from pyXLWriter.Format import Format
class WorksheetTest(unittest.TestCase):
def setUp(self):
self.ws = Worksheet("test", None, 0)
self.format = Format(color="green")
def tearDown(self):
self.ws = None
def test_methods_no_error(self):
self.ws.write([0, 1], None)
self.ws.write((0, 2), "Hello")
self.ws.write((0, 3), 888)
self.ws.write([0, 4], 888L)
self.ws.write_row((0, 0), [])
self.ws.write_row((0, 0), ["one", "two", "three"])
self.ws.write_row((0, 0), [1, 2, 3])
self.ws.write_col((0, 0), [])
self.ws.write_col((0, 0), ["one", "two", "three"])
self.ws.write_col((0, 0), [1, 2, 3])
self.ws.write_blank((0, 0), [])
def test_store_dimensions(self):
self.ws._store_dimensions()
datasize = self.ws._datasize
self.assertEqual(14, datasize)
def test_store_window2(self):
self.ws._store_window2()
datasize = self.ws._datasize
self.assertEqual(14, datasize)
def test_store_selection(self):
self.ws._store_selection(0, 0, 0, 0)
datasize = self.ws._datasize
self.assertEqual(19, datasize)
def test_store_colinfo_output(self):
self.ws._store_colinfo()
datasize = self.ws._datasize
self.assertEqual(15, datasize)
def test_format_row(self):
self.ws.set_row(3, None)
self.ws.set_row(2, 10)
self.ws.set_row(1, 25, self.format)
self.ws.set_row(4, None, self.format) # [4, 6]
def test_format_column(self):
self.ws.set_column(3, None)
self.ws.set_column(2, 10)
self.ws.set_column(1, 25, self.format)
self.ws.set_column([4, 6], None, self.format)
def test_process_cell(self):
rc = self.ws._process_cell("C80")
self.assertEqual((79, 2), rc)
rc = self.ws._process_cell([63, 84])
self.assertEqual((63, 84), rc)
def test_process_cellrange(self):
rc = self.ws._process_cellrange("C80")
self.assertEqual((79, 2, 79, 2), rc)
rc = self.ws._process_cellrange((7, 6))
self.assertEqual((7, 6, 7, 6), rc)
def test_process_rowrange(self):
rc = self.ws._process_rowrange(6)
self.assertEqual((6, 0, 6, -1), rc)
rc = self.ws._process_rowrange((5, 6))
self.assertEqual((5, 0, 6, -1), rc)
rc = self.ws._process_rowrange("4:6")
self.assertEqual((3, 0, 5, -1), rc)
def test_process_colrange(self):
rc = self.ws._process_colrange(5)
self.assertEqual((0, 5, -1, 5), rc)
rc = self.ws._process_colrange((5, 6))
self.assertEqual((0, 5, -1, 6), rc)
rc = self.ws._process_colrange("D:E")
self.assertEqual((0, 3, -1, 4), rc)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | 2,182,459,475,558,707,500 | 30.680412 | 82 | 0.567524 | false |
mjhouse/scripts | hex/env.py | 1 | 2609 | import re, shlex
import math
hextag = re.compile('{{(.*?)}}',flags=re.MULTILINE|re.DOTALL)
class HexExpression:
delimiters = (' ', '=','-','+','*','(',')')
quotes = ('\"','\'')
def __init__( self, tokens ):
self.operations = {
'=':self.assign,
'-':self.minus,
'+':self.plus,
'*':self.multi,
}
if isinstance(tokens,str):
self.tokens = [ tok for tok in self.tokenize( tokens ) if tok != '(' and tok !=')' ]
else:
self.tokens = [ tok for tok in tokens if tok != '(' and tok !=')' ]
self.value = self.tokens[0]
def execute( self ):
if self.value in self.operations:
self.tokens = HexExpression(self.tokens[1:]).execute()
#self.tokens = HexExpression(self.tokens[2:]).execute()
print(self.tokens)
#return self.operations[ self.value ]( self.left, self.right )
else:
return self.tokens
def assign( self, left, right ):
print ('assign: ' + str(left) + ' ' + str(right))
return {str(left):right}
def minus( self, left, right ):
print ('minus: ' + str(left) + ' ' + str(right))
return left - right
def plus( self, left, right ):
print ('plus: ' + str(left) + ' ' + str(right))
return left + right
def multi( self, left, right ):
print ('multi: ' + str(left) + ' ' + str(right))
return left*right
def tokenize( self, string ):
acc, word, inside = [], '', False
for char in string:
if char in self.delimiters and not inside:
if word.strip(): acc.append(word)
if char.strip(): acc.append(char)
word = ''
elif char in self.quotes:
inside = not inside
word += char
else:
word += char
if word.strip(): acc.append(word)
return [ self.evaluate(tok) for tok in acc ]
def evaluate( self, token ):
token = token.strip('. ')
if token.replace('.','',1).isdigit():
if '.' in token:
return float(token)
else:
return int(token)
elif token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return token
def collapse( self, tokens ):
pass
if __name__=='__main__':
exp = HexExpression('( = this (+ (+ 2 3) (- 4 3))')
print(exp.tokens)
result = exp.execute()
print(result)
| gpl-2.0 | -2,340,866,839,609,065,500 | 29.337209 | 96 | 0.48716 | false |
thejevans/pointSourceAnalysis | convertH5_GFU.py | 1 | 1276 | #!/usr/bin/env python
'''
Template to convert from HDF5 files to NPY numpy array Files. This implementation uses
parseGFU.py to parse the data
'''
# Imports
from __future__ import print_function
from optparse import OptionParser
import tables
import numpy as np
import parseGFU
# Command parsing
usage = '%prog [options] --infile <hdf5 file> --outdir <output directory>'
parser = OptionParser(usage = usage, description=__doc__)
parser.add_option('-i', '--infile', type = 'string', default = None,
help = 'HDF5 file to be parsed')
parser.add_option('-o', '--outdir', type = 'string', default = './',
help = 'NPY file output path')
(options, args) = parser.parse_args()
inFile = options.infile
outDir = options.outdir
# If no input file given, ask for one
if inFile == None:
inFile = raw_input('Select input HDF5 file: ')
# If output directory does not end with a /, add one
if outDir.rfind('/') != len(outDir)-1:
outDir = ''.join([outDir, '/'])
# Set output file name based on input file name
outFile = ''.join([outDir, inFile[inFile.rfind('/')+1:inFile.rfind('.')], '.npy'])
# Read in .h5 file
hdf = tables.openFile(inFile)
# Convert to numpy array
arr = parseGFU.convert(hdf)
# Write out .npy file
np.save(outFile, arr)
| gpl-3.0 | -6,438,877,276,518,161,000 | 27.355556 | 86 | 0.670063 | false |
flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_projection_spec.py | 1 | 10147 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that creates resource projection specification."""
import sys
from googlecloudsdk.third_party.py27 import py27_copy as copy
PROJECTION_ARG_DOC = ' projection: The parent ProjectionSpec.'
ALIGN_DEFAULT = 'left'
ALIGNMENTS = {'left': lambda s, w: s.ljust(w),
'center': lambda s, w: s.center(w),
'right': lambda s, w: s.rjust(w)}
class ProjectionSpec(object):
"""Creates a resource projection specification.
A resource projection is an expression string that contains a list of resource
keys with optional attributes. A projector is a method that takes a projection
specification and a resource object as input and produces a new
JSON-serializable object containing only the values corresponding to the keys
in the projection specification.
Optional projection key attributes may transform the values in the output
JSON-serializable object. Cloud SDK projection attributes are used for output
formatting.
A default or empty projection expression still produces a projector that
converts a resource to a JSON-serializable object.
This class is used by the resource projection expression parser to create a
resource projection specification from a projection expression string.
Attributes:
aliases: The short key name alias dictionary.
_active: The transform active level. Incremented each time Defaults() is
called. Used to determine active transforms.
attributes: Projection attributes dict indexed by attribute name.
_columns: A list of (key,_Attribute) tuples used to project a resource to
a list of columns.
_compiler: The projection compiler method for nested projections.
_empty: An empty projection _Tree used by Projector().
_name: The projection name from the expression string.
_tree: The projection _Tree root, used by
resource_projector.Evaluate() to efficiently project each resource.
symbols: Default and caller-defined transform function dict indexed by
function name.
"""
DEFAULT = 0 # _Attribute default node flag.
INNER = 1 # _Attribute inner node flag.
PROJECT = 2 # _Attribute project node flag.
class _Column(object):
"""Column key and transform attribute for self._columns.
Attributes:
key: The column key.
attribute: The column key _Attribute.
"""
def __init__(self, key, attribute):
self.key = key
self.attribute = attribute
def __init__(self, defaults=None, symbols=None, compiler=None):
"""Initializes a projection.
Args:
defaults: resource_projection_spec.ProjectionSpec defaults.
symbols: Transform function symbol table dict indexed by function name.
compiler: The projection compiler method for nested projections.
"""
self.aliases = {}
self.attributes = {}
self._columns = []
self._compiler = compiler
self._empty = None
self._name = None
self._snake_headings = {}
self._snake_re = None
if defaults:
self._active = defaults.active
self._tree = copy.deepcopy(defaults.GetRoot())
self.Defaults()
if defaults.symbols:
self.symbols = copy.deepcopy(defaults.symbols)
if symbols:
self.symbols.update(symbols)
else:
self.symbols = symbols if symbols else {}
self.aliases.update(defaults.aliases)
else:
self._active = 0
self._tree = None
self.symbols = symbols
@property
def active(self):
"""Gets the transform active level."""
return self._active
@property
def compiler(self):
"""Returns the projection compiler method for nested projections."""
return self._compiler
def _Defaults(self, projection):
"""Defaults() helper -- converts a projection to a default projection.
Args:
projection: A node in the original projection _Tree.
"""
projection.attribute.flag = self.DEFAULT
for node in projection.tree.values():
self._Defaults(node)
def _Print(self, projection, out, level):
"""Print() helper -- prints projection node p and its children.
Args:
projection: A _Tree node in the original projection.
out: The output stream.
level: The nesting level counting from 1 at the root.
"""
for key in projection.tree:
out.write('{indent} {key} : {attribute}\n'.format(
indent=' ' * level,
key=key,
attribute=projection.tree[key].attribute))
self._Print(projection.tree[key], out, level + 1)
def AddAttribute(self, name, value):
"""Adds name=value to the attributes.
Args:
name: The attribute name.
value: The attribute value
"""
self.attributes[name] = value
def DelAttribute(self, name):
"""Deletes name from the attributes if it is in the attributes.
Args:
name: The attribute name.
"""
if name in self.attributes:
del self.attributes[name]
def AddAlias(self, name, key):
"""Adds name as an alias for key to the projection.
Args:
name: The short (no dots) alias name for key.
key: The parsed key to add.
"""
self.aliases[name] = key
def AddKey(self, key, attribute):
"""Adds key and attribute to the projection.
Args:
key: The parsed key to add.
attribute: Parsed _Attribute to add.
"""
self._columns.append(self._Column(key, attribute))
def SetName(self, name):
"""Sets the projection name.
The projection name is the rightmost of the names in the expression.
Args:
name: The projection name.
"""
if self._name:
# Reset the name-specific attributes.
self.attributes = {}
self._name = name
def GetRoot(self):
"""Returns the projection root node.
Returns:
The resource_projector_parser._Tree root node.
"""
return self._tree
def SetRoot(self, root):
"""Sets the projection root node.
Args:
root: The resource_projector_parser._Tree root node.
"""
self._tree = root
def GetEmpty(self):
"""Returns the projector resource_projector_parser._Tree empty node.
Returns:
The projector resource_projector_parser._Tree empty node.
"""
return self._empty
def SetEmpty(self, node):
"""Sets the projector resource_projector_parser._Tree empty node.
The empty node is used by to apply [] empty slice projections.
Args:
node: The projector resource_projector_parser._Tree empty node.
"""
self._empty = node
def Columns(self):
"""Returns the projection columns.
Returns:
The columns in the projection, None if the entire resource is projected.
"""
return self._columns
def ColumnCount(self):
"""Returns the number of columns in the projection.
Returns:
The number of columns in the projection, 0 if the entire resource is
projected.
"""
return len(self._columns)
def Defaults(self):
"""Converts the projection to a default projection.
A default projection provides defaults for attribute values and function
symbols. An explicit non-default projection value always overrides the
corresponding default value.
"""
if self._tree:
self._Defaults(self._tree)
self._columns = []
self._active += 1
def Aliases(self):
"""Returns the short key name alias dictionary.
This dictionary maps short (no dots) names to parsed keys.
Returns:
The short key name alias dictionary.
"""
return self.aliases
def Attributes(self):
"""Returns the projection _Attribute dictionary.
Returns:
The projection _Attribute dictionary.
"""
return self.attributes
def Alignments(self):
"""Returns the projection column justfication list.
Returns:
The ordered list of alignment functions, where each function is one of
ljust [default], center, or rjust.
"""
return [ALIGNMENTS[col.attribute.align] for col in self._columns]
def Labels(self):
"""Returns the ordered list of projection labels.
Returns:
The ordered list of projection label strings, None if all labels are
empty.
"""
labels = [col.attribute.label or '' for col in self._columns]
return labels if any(labels) else None
def Name(self):
"""Returns the projection name.
The projection name is the rightmost of the names in the expression.
Returns:
The projection name, None if none was specified.
"""
return self._name
def Order(self):
"""Returns the projection sort key order suitable for use by sorted().
Example:
projection = resource_projector.Compile('...')
order = projection.Order()
if order:
rows = sorted(rows, key=itemgetter(*order))
Returns:
The list of (sort-key-index, reverse), [] if projection is None
or if all sort order indices in the projection are None (unordered).
"""
ordering = []
for i, col in enumerate(self._columns):
if col.attribute.order or col.attribute.reverse:
ordering.append(
(col.attribute.order or sys.maxint, i, col.attribute.reverse))
return [(i, reverse) for _, i, reverse in sorted(ordering)]
def Print(self, out=sys.stdout):
"""Prints the projection with indented nesting.
Args:
out: The output stream, sys.stdout if None.
"""
if self._tree:
self._Print(self._tree, out, 1)
def Tree(self):
"""Returns the projection tree root.
Returns:
The projection tree root.
"""
return self._tree
| bsd-3-clause | 4,696,202,010,263,796,000 | 28.669591 | 80 | 0.673401 | false |
r-kitaev/lucid-python-tinydav | test/TestTinyDAV.py | 1 | 39023 | # Unittests for tinydav lib.
# coding: utf-8
# Copyright (C) 2009 Manuel Hermann <[email protected]>
#
# This file is part of tinydav.
#
# tinydav is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unittests for tinydav lib."""
from __future__ import with_statement
from cookielib import CookieJar
from StringIO import StringIO
from xml.etree.ElementTree import ElementTree
import hashlib
import httplib
import urllib
import socket
import sys
import tinydav
import unittest
from tinydav import HTTPError, HTTPUserError, HTTPServerError
from tinydav import HTTPClient
from tinydav import HTTPResponse
from tinydav import CoreWebDAVClient
from tinydav import ExtendedWebDAVClient
from tinydav import WebDAVResponse
from tinydav import WebDAVLockResponse
from tinydav import MultiStatusResponse
from Mock import injected, replaced
import Mock
PYTHONVERSION = sys.version_info[:2] # (2, 5) or (2, 6)
if PYTHONVERSION >= (2, 7):
from xml.etree.ElementTree import ParseError
else:
from xml.parsers.expat import ExpatError as ParseError
MULTISTATUS = """\
<?xml version="1.0" encoding="utf-8"?>
<D:multistatus xmlns:D="DAV:" xmlns:dc="DC:">
<D:response>
<D:href>/3/38/38f/38fa476aa97a4b2baeb41a481fdca00b</D:href>
<D:propstat>
<D:prop>
<D:getetag>6ca7-364-475e65375ce80</D:getetag>
<dc:created/>
<dc:resource/>
<dc:author/>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>
</D:multistatus>
"""
# unbound prefix
MULTISTATUS_BROKEN = """\
<?xml version="1.0" encoding="utf-8"?>
<D:multistatus xmlns:D="DAV:">
<D:response>
<D:href>/3/38/38f/38fa476aa97a4b2baeb41a481fdca00b</D:href>
<D:propstat>
<D:prop>
<D:getetag>6ca7-364-475e65375ce80</D:getetag>
<dc:created/>
<dc:resource/>
<dc:author/>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>
</D:multistatus>
"""
REPORT = """\
<?xml version="1.0" encoding="utf-8" ?>
<D:multistatus xmlns:D="DAV:">
<D:response>
<D:href>/his/23/ver/V1</D:href>
<D:propstat>
<D:prop>
<D:version-name>V1</D:version-name>
<D:creator-displayname>Fred</D:creator-displayname>
<D:successor-set>
<D:href>/his/23/ver/V2</D:href>
</D:successor-set>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>
<D:response>
<D:href>/his/23/ver/V2</D:href>
<D:propstat>
<D:prop>
<D:version-name>V2</D:version-name>
<D:creator-displayname>Fred</D:creator-displayname>
<D:successor-set/>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>
</D:multistatus>
"""
RESPONSE = """\
<?xml version="1.0" encoding="utf-8"?>
<D:response xmlns:D="DAV:" xmlns:dc="DC:">
<D:href>/3/38/38f/38fa476aa97a4b2baeb41a481fdca00b</D:href>
<D:propstat>
<D:prop>
<D:getetag>6ca7-364-475e65375ce80</D:getetag>
<dc:created/>
<dc:resource/>
<dc:author>Me</dc:author>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>
"""
LOCKDISCOVERY = """\
<?xml version="1.0" encoding="utf-8" ?>
<D:prop xmlns:D="DAV:">
<D:lockdiscovery>
<D:activelock>
<D:locktype><D:write/></D:locktype>
<D:lockscope><D:exclusive/></D:lockscope>
<D:depth>Infinity</D:depth>
<D:owner>
<D:href>
http://localhost/me.html
</D:href>
</D:owner>
<D:timeout>Second-604800</D:timeout>
<D:locktoken>
<D:href>
opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4
</D:href>
</D:locktoken>
</D:activelock>
</D:lockdiscovery>
</D:prop>
"""
class HTTPClientTestCase(unittest.TestCase):
"""Test the HTTPClient class."""
def setUp(self):
"""Setup the client."""
self.http = HTTPClient("127.0.0.1", 80)
self.con = Mock.HTTPConnection()
self.http._getconnection = lambda: self.con
def test_init(self):
"""Test initializing the HTTPClient."""
self.assertEqual(self.http.host, "127.0.0.1")
self.assertEqual(self.http.port, 80)
def test_getconnection(self):
"""Test HTTPClient._getconnection."""
# http
http = HTTPClient("127.0.0.1", 80)
con = http._getconnection()
self.assertTrue(isinstance(con, httplib.HTTPConnection))
# https
http = HTTPClient("127.0.0.1", 80, protocol="https")
con = http._getconnection()
self.assertTrue(isinstance(con, httplib.HTTPSConnection))
http = HTTPClient("127.0.0.1", timeout=300, source_address="here.loc")
# Python2.5
mockhttplib = Mock.Omnivore(HTTPConnection=[None])
context = dict(
PYTHON2_6=False,
PYTHON2_7=False,
httplib=mockhttplib,
)
with injected(http._getconnection, **context):
http._getconnection()
call_log = mockhttplib.called["HTTPConnection"][0][1]
self.assertFalse(call_log["strict"])
self.assertEqual(call_log.get("timeout"), None)
self.assertEqual(call_log.get("source_address"), None)
# Python2.6
mockhttplib = Mock.Omnivore(HTTPConnection=[None])
context = dict(
PYTHON2_6=True,
PYTHON2_7=False,
httplib=mockhttplib,
)
with injected(http._getconnection, **context):
http._getconnection()
call_log = mockhttplib.called["HTTPConnection"][0][1]
self.assertFalse(call_log["strict"])
self.assertEqual(call_log["timeout"], 300)
self.assertEqual(call_log.get("source_address"), None)
# Python2.7
mockhttplib = Mock.Omnivore(HTTPConnection=[None])
context = dict(
PYTHON2_6=True,
PYTHON2_7=True,
httplib=mockhttplib,
)
with injected(http._getconnection, **context):
http._getconnection()
call_log = mockhttplib.called["HTTPConnection"][0][1]
self.assertFalse(call_log["strict"])
self.assertEqual(call_log["timeout"], 300)
self.assertEqual(call_log.get("source_address"), "here.loc")
def test_request(self):
"""Test HTTPClient._request."""
headers = {"X-Test": "Hello"}
resp = self.http._request("POST", "/foo", "my content", headers)
self.assertEqual(resp, 200)
# relative path to absolute path
resp = self.http._request("POST", "foo", "my content", headers)
self.assertTrue(self.con.path.startswith("/"))
self.assertEqual(resp, 200)
# cookies
self.http.cookie = Mock.Omnivore()
resp = self.http._request("POST", "/foo", "my content", headers)
self.assertTrue("add_cookie_header" in self.http.cookie.called)
# errors
self.con.response.status = 400
self.assertRaises(HTTPUserError, self.http._request, "POST", "/foo")
self.con.response.status = 500
self.assertRaises(HTTPServerError, self.http._request, "POST", "/foo")
def test_setcookie(self):
"""Test HTTPClient.setcookie."""
self.http.setcookie(CookieJar())
self.assertTrue(isinstance(self.http.cookie, CookieJar))
def test_setssl(self):
"""Test HTTPClient.setssl."""
# set nothing
self.http.setssl(None, None)
self.assertEqual(self.http.protocol, "http")
self.assertEqual(self.http.key_file, None)
self.assertEqual(self.http.cert_file, None)
# set key file only
self.http.setssl("Foo", None)
self.assertEqual(self.http.protocol, "https")
self.assertEqual(self.http.key_file, "Foo")
self.assertEqual(self.http.cert_file, None)
self.http.protocol = "http"
self.http.key_file = None
# set cert file only
self.http.setssl(None, "Foo")
self.assertEqual(self.http.protocol, "https")
self.assertEqual(self.http.key_file, None)
self.assertEqual(self.http.cert_file, "Foo")
self.http.protocol = "http"
self.http.key_file = None
# set key file and cert file
self.http.setssl("Foo", "Bar")
self.assertEqual(self.http.protocol, "https")
self.assertEqual(self.http.key_file, "Foo")
self.assertEqual(self.http.cert_file, "Bar")
def test_prepare(self):
"""Test HTTPClient._prepare."""
headers = {"X-Test": "Hello", "X-Test-2": "Umlaut ä"}
query = {"foo": "bär"}
http = HTTPClient("127.0.0.1", 80)
http.setbasicauth("me", "secret")
(uri, headers) = http._prepare("/foo bar/baz", headers, query)
self.assertEqual(uri, "/foo%20bar/baz?foo=b%C3%A4r")
expect = {
'Authorization': 'Basic bWU6c2VjcmV0',
'X-Test': 'Hello',
'X-Test-2': '=?utf-8?b?VW1sYXV0IMOk?=',
}
self.assertEqual(headers, expect)
def test_get(self):
"""Test HTTPClient.get."""
# prepare mock connection
self.con.response.status = 200
query = {"path": "/foo/bar"}
self.assertEqual(self.http.get("/index", None, query=query), 200)
self.assertEqual(self.con.method, "GET")
self.assertEqual(self.con.path, "/index?path=%2Ffoo%2Fbar")
self.assertTrue(self.con.closed)
def test_post(self):
"""Test HTTPClient.post."""
data = StringIO("Test data")
# prepare mock connection
self.con.response.status = 200
query = {"path": "/foo/bar"}
self.assertEqual(self.http.post("/index", None, query=query), 200)
self.assertEqual(self.con.method, "POST")
self.assertEqual(self.con.path, "/index?path=%2Ffoo%2Fbar")
self.assertTrue(self.con.closed)
def test_post_py25(self):
"""Test HTTPClient.post with Python 2.5."""
data = StringIO("Test data")
# prepare mock connection
self.con.response.status = 200
query = {"path": "/foo/bar"}
with injected(self.http.post, PYTHON2_6=False):
self.assertEqual(self.http.post("/index", data), 200)
self.assertEqual(self.con.method, "POST")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_post_content_none(self):
"""Test HTTPClient.post with None as content."""
# prepare mock connection
self.con.response.status = 200
query = {"path": "/foo/bar"}
self.assertEqual(self.http.post("/index", None, query=query), 200)
self.assertEqual(self.con.method, "POST")
self.assertEqual(self.con.path, "/index?path=%2Ffoo%2Fbar")
self.assertTrue(self.con.closed)
def test_post_no_query(self):
"""Test HTTPClient.post without query string."""
data = StringIO("Test data")
# prepare mock connection
self.con.response.status = 200
self.assertEqual(self.http.post("/index", data), 200)
self.assertEqual(self.con.method, "POST")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_post_form_data(self):
"""Test HTTPClient.post form-data."""
data = dict(a="foo", b="bar")
def urlencode(data):
urlencode.count += 1
return urllib.urlencode(data)
urlencode.count = 0
# prepare mock connection
mockurllib = Mock.Omnivore()
mockurllib.quote = urllib.quote
mockurllib.urlencode = urlencode
context = dict(
urllib_quote=mockurllib.quote,
urllib_urlencode=mockurllib.urlencode,
)
with injected(self.http.post, **context):
resp = self.http.post("/index", data)
self.assertEqual(urlencode.count, 1)
self.assertEqual(resp, 200)
def test_post_multipart(self):
"""Test HTTPClient.post multipart/form-data."""
data = dict(a="foo", b="bar")
resp = self.http.post("/index", data, as_multipart=True)
self.assertEqual(resp, 200)
self.assertEqual(self.con.method, "POST")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_options(self):
"""Test HTTPClient.options."""
self.con.response.status = 200
self.assertEqual(self.http.options("/index"), 200)
self.assertEqual(self.con.method, "OPTIONS")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_head(self):
"""Test HTTPClient.head."""
self.con.response.status = 200
self.assertEqual(self.http.head("/index"), 200)
self.assertEqual(self.con.method, "HEAD")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_delete(self):
"""Test HTTPClient.delete."""
self.con.response.status = 200
self.assertEqual(self.http.delete("/index"), 200)
self.assertEqual(self.con.method, "DELETE")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_trace(self):
"""Test HTTPClient.trace."""
self.con.response.status = 200
self.assertEqual(self.http.trace("/index"), 200)
self.assertEqual(self.con.method, "TRACE")
self.assertEqual(self.con.path, "/index")
self.assertTrue(self.con.closed)
def test_trace_maxforwards_via(self):
"""Test HTTPClient.trace with given maxforwards and via."""
self.con.response.status = 200
self.assertEqual(self.http.trace("/index", 5, ["a", "b"]), 200)
self.assertEqual(self.con.method, "TRACE")
self.assertEqual(self.con.path, "/index")
self.assertEqual(self.con.headers.get("Max-Forwards"), "5")
self.assertEqual(self.con.headers.get("Via"), "a, b")
self.assertTrue(self.con.closed)
def test_connect(self):
"""Test HTTPClient.connect."""
self.con.response.status = 200
self.assertEqual(self.http.connect("/"), 200)
self.assertEqual(self.con.method, "CONNECT")
self.assertEqual(self.con.path, "/")
self.assertTrue(self.con.closed)
class CoreWebDAVClientTestCase(unittest.TestCase):
"""Test the CoreWebDAVClient class."""
def setUp(self):
"""Setup the client."""
self.dav = CoreWebDAVClient("127.0.0.1", 80)
self.dav.setbasicauth("test", "passwd")
self.con = Mock.HTTPConnection()
self.dav._getconnection = lambda: self.con
response = Mock.Response()
response.content = LOCKDISCOVERY
response.status = 200
self.lock = WebDAVLockResponse(self.dav, "/", response)
def test_preparecopymove(self):
"""Test CoreWebDAVClient._preparecopymove."""
source = "/foo bar/baz"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello", "X-Test-2": "Umlaut ä"}
query = {"foo": "bär"}
http = CoreWebDAVClient("127.0.0.1", 80)
http.setbasicauth("me", "secret")
(source, headers) = http._preparecopymove(source, dest, 0,
False, headers)
self.assertEqual(source, "/foo%20bar/baz")
exp_headers = {
"Destination": "http://127.0.0.1:80/dest/in/ation",
"Overwrite": "F",
"Authorization": "Basic bWU6c2VjcmV0",
"X-Test": "Hello",
"X-Test-2": "=?utf-8?b?VW1sYXV0IMOk?=",
}
self.assertEqual(headers, exp_headers)
def test_preparecopymove_col(self):
"""Test CoreWebDAVClient._preparecopymove with collection as source."""
source = "/foo bar/baz/"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello", "X-Test-2": "Umlaut ä"}
query = {"foo": "bär"}
http = CoreWebDAVClient("127.0.0.1", 80)
http.setbasicauth("me", "secret")
(source, headers) = http._preparecopymove(source, dest, 0,
True, headers)
self.assertEqual(source, "/foo%20bar/baz/")
exp_headers = {
"Destination": "http://127.0.0.1:80/dest/in/ation",
"Depth": "0",
"Overwrite": "T",
"Authorization": "Basic bWU6c2VjcmV0",
"X-Test": "Hello",
"X-Test-2": "=?utf-8?b?VW1sYXV0IMOk?=",
}
self.assertEqual(headers, exp_headers)
def test_preparecopymove_illegal_depth(self):
"""Test CoreWebDAVClient._preparecopymove with illegal depth value."""
source = "/foo bar/baz"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello"}
query = {"foo": "bär"}
http = CoreWebDAVClient("127.0.0.1", 80)
http.setbasicauth("me", "secret")
self.assertRaises(
ValueError,
http._preparecopymove,
source, dest, "1", False, headers
)
def test_mkcol(self):
"""Test CoreWebDAVClient.mkcol."""
# prepare mock connection
self.con.response.status = 201
self.assertEqual(self.dav.mkcol("/foobar"), 201)
self.assertEqual(self.con.method, "MKCOL")
self.assertEqual(self.con.path, "/foobar")
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_propfind(self):
"""Test CoreWebDAVClient.propfind."""
# prepare mock connection
self.con.response.status = 207
self.con.response.content = MULTISTATUS
self.assertEqual(self.dav.propfind("/foobar"), 207)
self.assertEqual(self.con.method, "PROPFIND")
self.assertEqual(self.con.path, "/foobar")
self.assertEqual(self.con.headers["Depth"], "0")
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_propfind_depth_1(self):
"""Test CoreWebDAVClient.propfind with depth 1."""
# prepare mock connection
self.con.response.status = 207
self.con.response.content = MULTISTATUS
self.assertEqual(self.dav.propfind("/foobar", "1"), 207)
self.assertEqual(self.con.method, "PROPFIND")
self.assertEqual(self.con.path, "/foobar")
self.assertEqual(self.con.headers["Depth"], "1")
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_propfind_illegal_depth(self):
"""Test CoreWebDAVClient.propfind with illegal depth."""
# prepare mock connection
self.assertRaises(ValueError, self.dav.propfind, "/foobar", "ABC")
def test_propfind_illegal_args(self):
"""Test CoreWebDAVClient.propfind with illegal args."""
# prepare mock connection
self.assertRaises(ValueError,
self.dav.propfind, "/foobar", 1,
properties=["foo"], include=["bar"])
def test_put(self):
"""Test CoreWebDAVClient.put."""
# prepare mock connection
self.con.response.status = 201
self.con.response.content = "Test content."
self.assertEqual(self.dav.put("/foobar", self.con.response), 201)
self.assertEqual(self.con.method, "PUT")
self.assertEqual(self.con.path, "/foobar")
if PYTHONVERSION == (2, 5):
self.assertEqual(self.con.body, "Test content.")
else:
self.assertEqual(self.con.body, self.con.response)
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_proppatch(self):
"""Test CoreWebDAVClient.proppatch."""
self.con.response.status = 207
self.con.response.content = MULTISTATUS
props = {"CADN:author": "me", "CADN:created": "2009-09-09 13:31"}
ns = {"CADN": "CADN:"}
self.assertEqual(207, self.dav.proppatch("/foobar", props, None, ns))
def test_proppatch_noprops(self):
"""Test CoreWebDAVClient.proppatch with no defined properties."""
ns = {"CADN": "CADN:"}
self.assertRaises(ValueError,
self.dav.proppatch, "/foobar", None, None, ns)
def test_delete(self):
"""Test CoreWebDAVClient.delete."""
self.con.response.status = 200
self.assertEqual(200, self.dav.delete("/foobar", None))
def test_delete_collection(self):
"""Test CoreWebDAVClient.delete on collection."""
self.con.response.status = 200
self.assertEqual(200, self.dav.delete("/foobar/", None))
def test_copy(self):
"""Test CoreWebDAVClient.copy."""
self.con.response.status = 200
source = "/foo bar/baz"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello"}
resp = self.dav.copy(source, dest, 0, False, headers)
self.assertEqual(resp, 200)
def test_move(self):
"""Test CoreWebDAVClient.move."""
self.con.response.status = 200
source = "/foo bar/baz"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello"}
resp = self.dav.move(source, dest, 0, False, headers)
self.assertEqual(resp, 200)
def test_move_collection_illegal_depth(self):
"""Test CoreWebDAVClient.move on collections with illegal depth."""
self.con.response.status = 200
source = "/foo bar/baz/"
dest = "/dest/in/ation"
headers = {"X-Test": "Hello"}
self.assertRaises(
ValueError,
self.dav.move,
source, dest, 0
)
def test_lock(self):
"""Test CoreWebDAVClient.lock."""
self.con.response.status = 200
resp = self.dav.lock("/foo")
self.assertTrue(isinstance(resp, WebDAVLockResponse))
self.assertEqual(resp, 200)
def test_lock_timeout(self):
"""Test CoreWebDAVClient.lock with timeout."""
self.con.response.status = 200
resp = self.dav.lock("/foo", timeout=12345)
self.assertEqual(resp, 200)
def test_lock_timeout_inf(self):
"""Test CoreWebDAVClient.lock with infinite timeout."""
self.con.response.status = 200
resp = self.dav.lock("/foo", timeout="infinite")
self.assertEqual(resp, 200)
def test_lock_timeout_toolong(self):
"""Test CoreWebDAVClient.lock with too long timeout."""
self.assertRaises(
ValueError,
self.dav.lock,
"/foo",
timeout=4294967296
)
def test_lock_timeout_err(self):
"""Test CoreWebDAVClient.lock with wrong timeout."""
self.assertRaises(
ValueError,
self.dav.lock,
"/foo",
timeout="abc"
)
def test_lock_depth(self):
"""Test CoreWebDAVClient.lock with given depth."""
self.con.response.status = 200
resp = self.dav.lock("/foo", depth=0)
self.assertEqual(resp, 200)
self.assertEqual(self.con.headers["Depth"], "0")
def test_lock_illegaldepth(self):
"""Test CoreWebDAVClient.lock with given illegal depth."""
self.assertRaises(
ValueError,
self.dav.lock,
"/foo",
depth=1
)
def test_unlock_lock(self):
"""Test CoreWebDAVClient.unlock with lock object."""
self.dav.locks[self.lock._tag] = self.lock
self.con.response.status = 204
resp = self.dav.unlock(self.lock)
self.assertEqual(self.con.method, "UNLOCK")
self.assertEqual(self.con.headers["Lock-Token"],
"<%s>" % self.lock.locktokens[0])
self.assertTrue(self.lock._tag not in self.dav.locks)
def test_unlock_uri(self):
"""Test CoreWebDAVClient.unlock with uri."""
self.dav.locks[self.lock._tag] = self.lock
self.con.response.status = 204
resp = self.dav.unlock("/")
self.assertEqual(self.con.method, "UNLOCK")
self.assertEqual(self.con.headers["Lock-Token"],
"<%s>" % self.lock.locktokens[0])
self.assertTrue(self.lock._tag not in self.dav.locks)
def test_unlock_uri_no_token(self):
"""Test CoreWebDAVClient.unlock with uri."""
self.con.response.status = 204
self.assertRaises(ValueError, self.dav.unlock, "/")
def test_unlock_lock_no_token(self):
"""Test CoreWebDAVClient.unlock with lock object and no token."""
self.con.response.status = 204
resp = self.dav.unlock(self.lock)
self.assertEqual(self.con.method, "UNLOCK")
self.assertEqual(self.con.headers["Lock-Token"],
"<%s>" % self.lock.locktokens[0])
self.assertTrue(self.lock._tag not in self.dav.locks)
class ExtendedWebDAVClientTestCase(unittest.TestCase):
"""Test the ExtendedWebDAVClient class."""
def setUp(self):
"""Setup the client."""
self.dav = ExtendedWebDAVClient("127.0.0.1", 80)
self.dav.setbasicauth("test", "passwd")
self.con = Mock.HTTPConnection()
self.dav._getconnection = lambda: self.con
def test_report(self):
"""Test ExtendedWebDAVClient.report."""
self.con.response.status = 207
self.con.response.content = REPORT
props = ["version-name", "creator-displayname", "successor-set"]
response = self.dav.report("/foo.html", properties=props)
self.assertEqual(response, 207)
self.assertEqual(self.con.method, "REPORT")
self.assertEqual(self.con.path, "/foo.html")
self.assertEqual(self.con.headers["Depth"], "0")
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_report_depth_1(self):
"""Test ExtendedWebDAVClient.report with depth 1."""
self.con.response.status = 207
self.con.response.content = REPORT
props = ["version-name", "creator-displayname", "successor-set"]
response = self.dav.report("/foo.html", "1", props)
self.assertEqual(response, 207)
self.assertEqual(self.con.method, "REPORT")
self.assertEqual(self.con.path, "/foo.html")
self.assertEqual(self.con.headers["Depth"], "1")
self.assertTrue(self.con.closed)
self.assertTrue("Authorization" in self.con.headers)
def test_report_illegal_depth(self):
"""Test ExtendedWebDAVClient.report with illegal depth."""
# prepare mock connection
self.assertRaises(ValueError, self.dav.report, "/foo.html", "ABC")
class HTTPResponseTestCase(unittest.TestCase):
"""Test HTTPResponse class."""
def setUp(self):
"""Initialize the tests."""
self.response = Mock.Response()
self.response.status = 207
self.response.content = MULTISTATUS
self.httpresponse = HTTPResponse(self.response)
# 401
self.response = Mock.Response()
digest = 'Digest realm="restricted" domain="foo.de" nonce="abcd1234"'\
'opaque="qwer4321" stale=false algorithm="MD5"'
self.response.headers["www-authenticate"] = digest
self.response.status = 401
self.response.content = ""
self.httpresponse401 = HTTPResponse(self.response)
def test_init(self):
"""Test Initializing the HTTPResponse."""
self.assertEqual(self.httpresponse.content, MULTISTATUS)
self.assertEqual(self.httpresponse.statusline,
"HTTP/1.1 207 The reason")
self.assertEqual(self.httpresponse401.content, "")
self.assertEqual(self.httpresponse401.statusline,
"HTTP/1.1 401 The reason")
self.assertEqual(self.httpresponse401.schema, "Digest")
self.assertEqual(self.httpresponse401.realm, "restricted")
self.assertEqual(self.httpresponse401.domain, "foo.de")
self.assertEqual(self.httpresponse401.nonce, "abcd1234")
self.assertEqual(self.httpresponse401.opaque, "qwer4321")
self.assertFalse(self.httpresponse401.stale)
self.assertEqual(self.httpresponse401.algorithm, hashlib.md5)
def test_str(self):
"""Test HTTPResponse.__str__."""
self.assertEqual(str(self.httpresponse), "HTTP/1.1 207 The reason")
self.assertEqual(str(self.httpresponse401), "HTTP/1.1 401 The reason")
def test_repr(self):
"""Test HTTPResponse.__repr__."""
self.assertEqual(repr(self.httpresponse), "<HTTPResponse: 207>")
self.assertEqual(repr(self.httpresponse401), "<HTTPResponse: 401>")
def test_status(self):
"""Test HTTPResponse.status property."""
self.assertEqual(self.httpresponse, 207)
self.assertEqual(self.httpresponse401, 401)
class WebDAVResponseTestCase(unittest.TestCase):
"""Test the WebDAVResponse class."""
def test_init(self):
"""Test initializing the WebDAVResponse."""
response = Mock.Response()
response.content = MULTISTATUS
# no parsing
response.status = 200
davresponse = WebDAVResponse(response)
self.assertFalse(bool(davresponse._etree.getroot()))
# parsing
response.status = 207
davresponse = WebDAVResponse(response)
self.assertTrue(bool(davresponse._etree.getroot()))
# broken xml
response.status = 207
response.content = MULTISTATUS_BROKEN
davresponse = WebDAVResponse(response)
self.assertTrue(bool(davresponse._etree.getroot()))
self.assertTrue(isinstance(davresponse.parse_error, ParseError))
def test_len(self):
"""Test WebDAVResponse.__len__."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 200
davresponse = WebDAVResponse(response)
self.assertEqual(len(davresponse), 1)
def test_len_207(self):
"""Test WebDAVResponse.__len__ in Multi-Status."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 207
davresponse = WebDAVResponse(response)
self.assertEqual(len(davresponse), 1)
def test_iter(self):
"""Test WebDAVResponse.__iter__."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 200
davresponse = WebDAVResponse(response)
self.assertTrue(isinstance(list(davresponse)[0], WebDAVResponse))
def test_iter_207(self):
"""Test WebDAVResponse.__iter__ in Multi-Status."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 207
davresponse = WebDAVResponse(response)
self.assertEqual(list(davresponse)[0], 200)
def test_parse_xml_content(self):
"""Test WebDAVResponse._parse_xml_content."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 207
with replaced(WebDAVResponse, _parse_xml_content=Mock.omnivore_func()):
davresponse = WebDAVResponse(response)
davresponse._parse_xml_content()
href = davresponse._etree.findtext("/{DAV:}response/{DAV:}href")
self.assertEquals(href, "/3/38/38f/38fa476aa97a4b2baeb41a481fdca00b")
def test_parse_xml_content_broken(self):
"""Test WebDAVResponse._parse_xml_content with broken XML."""
response = Mock.Response()
response.content = MULTISTATUS_BROKEN
response.status = 207
with replaced(WebDAVResponse, _parse_xml_content=Mock.omnivore_func()):
davresponse = WebDAVResponse(response)
davresponse._parse_xml_content()
empty = davresponse._etree.getroot().getchildren()[0]
self.assertEquals(empty.tag, "empty")
def test_set_multistatus(self):
"""Test WebDAVResponse._set_multistatus."""
response = Mock.Response()
response.content = MULTISTATUS
response.status = 200
davresponse = WebDAVResponse(response)
mockparser = Mock.Omnivore()
with replaced(davresponse, _parse_xml_content=mockparser):
self.assertFalse(davresponse.is_multistatus)
self.assertEquals(len(mockparser.called["__call__"]), 0)
davresponse._set_multistatus()
self.assertTrue(davresponse.is_multistatus)
self.assertEquals(len(mockparser.called["__call__"]), 1)
class WebDAVLockResponseTestCase(unittest.TestCase):
"""Test the WebDAVLockResponse class."""
def setUp(self):
"""Setup the tests"""
self.client = CoreWebDAVClient("localhost")
response = Mock.Response()
response.content = LOCKDISCOVERY
response.status = 200
self.lock = WebDAVLockResponse(self.client, "/", response)
def test_init_200(self):
"""Test WebDAVLockResponse.__init__ with 200 status."""
lock = self.lock
self.assertEqual(lock.lockscope.tag, "{DAV:}exclusive")
self.assertEqual(lock.locktype.tag, "{DAV:}write")
self.assertEqual(lock.depth, "Infinity")
href = "http://localhost/me.html"
self.assertEqual(lock.owner.findtext("{DAV:}href").strip(), href)
self.assertEqual(lock.timeout, "Second-604800")
token = "opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4"
self.assertEqual(lock.locktokens[0], token)
def test_init_409(self):
"""Test WebDAVLockResponse.__init__ with 409 status."""
client = CoreWebDAVClient("localhost")
response = Mock.Response()
response.content = MULTISTATUS
response.status = 409
lock = WebDAVLockResponse(client, "/", response)
self.assertTrue(lock._etree.find("/{DAV:}response") is not None)
self.assertTrue(lock.is_multistatus)
def test_repr(self):
"""Test WebDAVLockResponse.__repr__."""
lrepr = "<WebDAVLockResponse: <%s> 200>" % self.lock._tag
self.assertEqual(repr(self.lock), lrepr)
def test_call(self):
"""Test WebDAVLockResponse.__call__."""
self.assertTrue(self.lock._tagged)
self.lock(False)
self.assertFalse(self.lock._tagged)
self.lock()
self.assertTrue(self.lock._tagged)
self.lock(False)
self.lock(True)
self.assertTrue(self.lock._tagged)
def test_contextmanager(self):
"""Test contextmanager on WebDAVLockResponse."""
self.client.headers["If"] = "My previous if"
# tagged
with self.lock:
expect = "<http://localhost:80/> "\
"(<opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)"
if_header = self.client.headers["If"]
self.assertEqual(expect, if_header)
self.assertEqual("My previous if", self.client.headers["If"])
# untagged
with self.lock(False):
expect = "(<opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)"
if_header = self.client.headers["If"]
self.assertEqual(expect, if_header)
self.assertEqual("My previous if", self.client.headers["If"])
# untagged, no previous if header
del self.client.headers["If"]
with self.lock(False):
expect = "(<opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)"
if_header = self.client.headers["If"]
self.assertEqual(expect, if_header)
self.assertTrue("If" not in self.client.headers)
class MultiStatusResponseTestCase(unittest.TestCase):
"""Test the MultiStatusResponse class."""
def setUp(self):
self.etree = ElementTree()
self.etree.parse(StringIO(RESPONSE))
self.msr = MultiStatusResponse(self.etree.getroot())
def test_init(self):
"""Test initializing the MultiStatusResponse."""
self.assertEqual(self.msr, 200)
def test_repr(self):
"""Test MultiStatusResponse.__repr__."""
self.assertEqual(repr(self.msr), "<MultiStatusResponse: 200>")
def test_getitem(self):
"""Test MultiStatusResponse.__getitem__."""
self.assertEqual(self.msr["getetag"].text, "6ca7-364-475e65375ce80")
self.assertEqual(self.msr["{DC:}author"].text, "Me")
self.assertRaises(KeyError, lambda: self.msr['non-existant'])
def test_keys(self):
"""Test MultiStatusResponse.keys."""
expect = ['getetag', '{DC:}created', '{DC:}resource', '{DC:}author']
expect.sort()
keys = self.msr.keys()
keys.sort()
self.assertEqual(keys, expect)
def test_iter(self):
"""Test MultiStatusResponse.__iter__."""
expect = ['getetag', '{DC:}created', '{DC:}resource', '{DC:}author']
expect.sort()
keys = list(self.msr)
keys.sort()
self.assertEqual(keys, expect)
def test_iterkeys(self):
"""Test MultiStatusResponse.iterkeys."""
expect = ['getetag', '{DC:}created', '{DC:}resource', '{DC:}author']
expect.sort()
keys = list(self.msr.iterkeys())
keys.sort()
self.assertEqual(keys, expect)
def test_items(self):
"""Test MultiStatusResponse.items."""
expect = [('getetag', '6ca7-364-475e65375ce80'),
('{DC:}created', None),
('{DC:}resource', None),
('{DC:}author', 'Me')]
expect.sort()
items = list((k, v.text) for (k, v) in self.msr.items())
items.sort()
self.assertEqual(items, expect)
def test_iteritems(self):
"""Test MultiStatusResponse.iteritems."""
expect = [('getetag', '6ca7-364-475e65375ce80'),
('{DC:}created', None),
('{DC:}resource', None),
('{DC:}author', 'Me')]
expect.sort()
items = list((k, v.text) for (k, v) in self.msr.iteritems())
items.sort()
self.assertEqual(items, expect)
def test_get(self):
"""Test MultiStatusResponse.get."""
self.assertEqual(self.msr.get("{DC:}author").text, "Me")
self.assertEqual(self.msr.get("author", namespace="DC:").text, "Me")
self.assertEqual(self.msr.get("non-existant", "You"), "You")
def test_statusline(self):
"""Test MultiStatusResponse.statusline property."""
self.assertEqual(self.msr.statusline, "HTTP/1.1 200 OK")
def test_href(self):
"""Test MultiStatusResponse.href property."""
self.assertEqual(self.msr.href,
"/3/38/38f/38fa476aa97a4b2baeb41a481fdca00b")
def test_namespaces(self):
"""Test MultiStatusResponse.namespaces property."""
expect = set(["DC:", "DAV:"])
self.msr.iterkeys = lambda b: ["foo", "bar", "{DC:}x", "{DAV:}y"]
self.assertEqual(self.msr.namespaces, expect)
| gpl-3.0 | -433,869,584,700,382,140 | 36.733075 | 79 | 0.614517 | false |
cmr/automatafl | old_python_prototype/server.py | 1 | 6501 | """
Automatafl HTTP game server.
There is a central store of all currently-running games, with the board and
game state. Each game is identified by a UUIDv4. When creating a game, the
client has an option to make the game public or private. Public games will be
listed publically. A player may join any game which they have a URL to, if
there are any empty player slots. Clients are expected to open a websocket
connection to receive notifications of moves and state changes for the games
they are participating in and/or watching.
Sessions store identifiers, also UUIDv4's, which are used to lookup and
track the games a given client is participating in.
The server keeps a log of committed transactions (sets of moves) for a
not-yet-implemented replay feature.
"""
from contextlib import contextmanager
import os
import uuid
from flask.json import jsonify
from flask import Flask, session, abort
from flask_socketio import SocketIO, join_room, leave_room, send, emit
import model
class FEPleb(model.Plebeian):
def __init__(self, id, name, uuid):
self.id = id
self.name = name
self.uuid = uuid
class FEGame(object):
def __init__(self, name):
self.name = name
self.private = False
self.max_plebid = 0
self.sess_plebs = {}
self.uuid_plebs = {}
self.uuid = uuid.uuid4()
self.committed_moves = []
self.setup = None
self.mgame = None
def add_pleb(self, name, sessid):
pleb = FEPleb(self.max_plebid, name, uuid.uuid4())
self.sess_plebs[sessid] = pleb
self.uuid_plebs[pleb.uuid] = pleb
self.max_plebid += 1
return self.plebs[-1]
def create_game_model(self):
self.mgame = model.Game(plebs=self.plebs, setup=self.setup)
def pleb_uuids(self):
return [pleb.uuid for pleb in self.plebs]
def pleb_from_sess(self, sess):
return self.sess_plebs.get(sess)
def pleb_from_uuid(self, uuid):
return self.uuid_plebs.get(uuid)
def serialize(self):
return {
'board': None if self.mgame is None else self.mgame.Serialize(),
'name': self.name,
'players': len(self.sess_plebs),
}
# TODO: Setup configuration (chosing initial board etc)
def is_coord(thing):
# Coordinates are a [y, x] pair; JSON should deserialize them as a list.
return isinstance(thing, list) and len(thing) == 2 and isinstance(thing[0], int)
# Map from UUID to dict.
client_session_states = {}
# Map from UUID to FEGame
current_games = {}
app = Flask(__name__)
app.secret_key = os.urandom(32)
socketio = SocketIO(app)
def sess_uuid():
if "sess" not in session:
session["sess"] = uuid.uuid4()
return session["sess"]
def client_sess_state():
uid = sess_uuid()
if uid not in client_session_states:
d = {}
client_session_states[uid] = d
d["in_games"] = []
return client_session_states.get(uid)
@app.route("/game", methods=["GET"])
def list_games():
return jsonify([
feg.serialize()
for feg in current_games.values()
if not feg.private or feg.pleb_from_sess(sess_uuid())
])
@app.route("/game/<uuid:gameid>", methods=["GET"])
def get_game(gameid):
feg = current_games.get(gameid, None)
if feg is None:
abort(404)
return jsonify({"status": "ok", "game": feg.serialize()})
@app.route("/game/<uuid:gameid>/join", methods=["POST"])
def join_game(gameid):
feg = current_games.get(gameid, None)
if feg is None:
abort(404)
if hasattr(feg, "mgame"):
abort(403)
j = request.get_json()
if "name" not in j:
abort(400)
feg.add_pleb(j["name"], sess_uuid())
return jsonify({"status": "ok"})
@app.route("/game", methods=["POST"])
def make_game():
j = request.get_json()
if "name" not in j:
abort(400)
feg = FEGame(j["name"])
current_games[feg.uuid] = feg
return jsonify({"status": "ok", "uuid": feg.uuid})
@socketio.on("subscribe_to_game")
def subscribe_to_game(msg):
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "sessid" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NO_SESSID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
elif msg["game_id"] not in current_games:
return {"status": "error", "reqid": msg["reqid"], "error": "GAME_NOT_EXIST"}
elif msg["game_id"] not in client_sess_state()["in_games"]:
return {"status": "error", "reqid": msg["reqid"], "error": "NOT_IN_GAME"}
else:
join_room(msg["game_id"])
return {"status": "ok", "reqid": msg["reqid"]}
@socketio.on("unsubscribe_from_game")
def unsubscribe_from_game(msg):
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
else:
leave_room(msg["game_id"])
return {"status": "ok", "reqid": msg["reqid"]}
@socketio.on("submit_move")
def submit_move(msg):
s = client_sess_state()
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
elif msg["game_id"] not in s["in_games"]:
return {"status": "error", "reqid": msg["reqid"], "error": "NOT_IN_GAME"}
elif msg["game_id"] not in current_games:
return {"status": "error", "reqid": msg["reqid"], "error": "GAME_NOT_EXIST"}
elif not is_coord(msg.get("from")) or not is_coord(msg.get("to")):
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_COORDS"}
else:
feg = current_games[msg["game_id"]]
plebid = feg.pleb_from_sess(sess_uuid())
iev = model.Move(plebid, msg["from"], msg["to"])
oev = feg.mgame.Handle(iev)
if len(feg.mgame.pending_moves) == len(feg.mgame.plebeians):
conflicts = feg.mgame.Resolve()
emit("resolved", {"status": "ok", "event": [c.serialize() for c in
conflicts]}, broadcast=True, room=feg.uuid)
return {"status": "ok", "reqid": msg["reqid"], "event": oev.serialize()}
@app.route("/")
def index():
return app.send_static_file("index.html")
if __name__ == "__main__":
socketio.run(app)
| apache-2.0 | -5,343,330,481,925,989,000 | 31.505 | 84 | 0.610983 | false |
matematik7/CSSQC | tests/test_colonFormat.py | 1 | 4046 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# test_colonFormat.py
#
# test for colonFormat rule
# ----------------------------------------------------------------
# copyright (c) 2014 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
import unittest
from cssqc.parser import CSSQC
from cssqc.qualityWarning import QualityWarning
class Test_colonFormat(unittest.TestCase):
def setUp(self):
self.sample = '''div {
width: 100%;
}
.class1 {
height : 100%;
}
.class2 {
width :50%;
}
.class3 {
margin:0;
}
.class4 {
-webkit-border-image : @url @iwidth repeat;
-moz-border-image : @url @iwidth repeat;
border-image : @url @iwidth repeat;
border-width : @bwidth;
background-clip : padding-box;
border-style : solid;
}'''
def parseBefore(self, data):
c = CSSQC({"colonFormat": 'before'})
c.parse(data)
return c
def parseAfter(self, data):
c = CSSQC({"colonFormat": 'after'})
c.parse(data)
return c
def parseBoth(self, data):
c = CSSQC({"colonFormat": 'both'})
c.parse(data)
return c
def parseNone(self, data):
c = CSSQC({"colonFormat": 'none'})
c.parse(data)
return c
def parseAlign(self, data):
c = CSSQC({"colonFormat": 'align'})
c.parse(data)
return c
def test_cf_before(self):
c_before = self.parseBefore(self.sample)
self.assertEqual(c_before.warnings, [
QualityWarning('colonFormat', 2),
QualityWarning('colonFormat', 5),
QualityWarning('colonFormat', 11),
QualityWarning('colonFormat', 14),
QualityWarning('colonFormat', 15),
QualityWarning('colonFormat', 16),
QualityWarning('colonFormat', 17),
QualityWarning('colonFormat', 18),
QualityWarning('colonFormat', 19)
])
def test_cf_after(self):
c_after = self.parseAfter(self.sample)
self.assertEqual(c_after.warnings, [
QualityWarning('colonFormat', 5),
QualityWarning('colonFormat', 8),
QualityWarning('colonFormat', 11),
QualityWarning('colonFormat', 14),
QualityWarning('colonFormat', 15),
QualityWarning('colonFormat', 16),
QualityWarning('colonFormat', 17),
QualityWarning('colonFormat', 18),
QualityWarning('colonFormat', 19)
])
def test_cf_both(self):
c_both = self.parseBoth(self.sample)
self.assertEqual(c_both.warnings, [
QualityWarning('colonFormat', 2),
QualityWarning('colonFormat', 8),
QualityWarning('colonFormat', 11),
QualityWarning('colonFormat', 15),
QualityWarning('colonFormat', 16),
QualityWarning('colonFormat', 17),
QualityWarning('colonFormat', 18),
QualityWarning('colonFormat', 19)
])
def test_cf_none(self):
c_none = self.parseNone(self.sample)
self.assertEqual(c_none.warnings, [
QualityWarning('colonFormat', 2),
QualityWarning('colonFormat', 5),
QualityWarning('colonFormat', 8),
QualityWarning('colonFormat', 14),
QualityWarning('colonFormat', 15),
QualityWarning('colonFormat', 16),
QualityWarning('colonFormat', 17),
QualityWarning('colonFormat', 18),
QualityWarning('colonFormat', 19)
])
def test_cf_align(self):
c_align = self.parseAlign(self.sample)
self.assertEqual(c_align.warnings, [
QualityWarning('colonFormat', 2),
QualityWarning('colonFormat', 8),
QualityWarning('colonFormat', 11),
QualityWarning('colonFormat', 19)
])
| mit | -3,760,912,884,172,937,000 | 31.111111 | 66 | 0.542017 | false |
ppy/angle | scripts/generate_android_bp.py | 1 | 19660 | # Copyright The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Generates an Android.bp file from the json output of a 'gn desc' command.
# Example usage:
# gn desc out/Android --format=json "*" > desc.json
# python scripts/generate_android_bp.py desc.json > Android.bp
import json
import sys
import re
import os
import argparse
root_targets = [
"//:libGLESv2",
"//:libGLESv1_CM",
"//:libEGL",
"//:libfeature_support",
]
sdk_version = '28'
stl = 'libc++_static'
abi_arm = 'arm'
abi_arm64 = 'arm64'
abi_x86 = 'x86'
abi_x64 = 'x86_64'
abi_targets = [abi_arm, abi_arm64, abi_x86, abi_x64]
def tabs(indent):
return ' ' * (indent * 4)
def has_child_values(value):
# Elements of the blueprint can be pruned if they are empty lists or dictionaries of empty
# lists
if isinstance(value, list):
return len(value) > 0
if isinstance(value, dict):
for (item, item_value) in value.items():
if has_child_values(item_value):
return True
return False
# This is a value leaf node
return True
def write_blueprint_key_value(output, name, value, indent=1):
if not has_child_values(value):
return
if isinstance(value, set) or isinstance(value, list):
value = list(sorted(set(value)))
if isinstance(value, list):
output.append(tabs(indent) + '%s: [' % name)
for item in value:
output.append(tabs(indent + 1) + '"%s",' % item)
output.append(tabs(indent) + '],')
return
if isinstance(value, dict):
if not value:
return
output.append(tabs(indent) + '%s: {' % name)
for (item, item_value) in value.items():
write_blueprint_key_value(output, item, item_value, indent + 1)
output.append(tabs(indent) + '},')
return
if isinstance(value, bool):
output.append(tabs(indent) + '%s: %s,' % (name, 'true' if value else 'false'))
return
output.append(tabs(indent) + '%s: "%s",' % (name, value))
def write_blueprint(output, target_type, values):
output.append('%s {' % target_type)
for (key, value) in values.items():
write_blueprint_key_value(output, key, value)
output.append('}')
def gn_target_to_blueprint_target(target, target_info):
if 'output_name' in target_info:
return target_info['output_name']
# Split the gn target name (in the form of //gn_file_path:target_name) into gn_file_path and
# target_name
target_regex = re.compile(r"^//([a-zA-Z0-9\-_/]*):([a-zA-Z0-9\-_.]+)$")
match = re.match(target_regex, target)
assert match is not None
gn_file_path = match.group(1)
target_name = match.group(2)
assert len(target_name) > 0
# Clean up the gn file path to be a valid blueprint target name.
gn_file_path = gn_file_path.replace("/", "_").replace(".", "_").replace("-", "_")
# Generate a blueprint target name by merging the gn path and target so each target is unique.
# Prepend the 'angle' prefix to all targets in the root path (empty gn_file_path).
# Skip this step if the target name already starts with 'angle' to avoid target names such as 'angle_angle_common'.
root_prefix = "angle"
if len(gn_file_path) == 0 and not target_name.startswith(root_prefix):
gn_file_path = root_prefix
# Avoid names such as _angle_common if the gn_file_path is empty.
if len(gn_file_path) > 0:
gn_file_path += "_"
return gn_file_path + target_name
def remap_gn_path(path):
# TODO: pass the gn gen folder as an arg so it is future proof. b/150457277
remap_folders = [
('out/Android/gen/angle/', ''),
('out/Android/gen/', ''),
]
remapped_path = path
for (remap_source, remap_dest) in remap_folders:
remapped_path = remapped_path.replace(remap_source, remap_dest)
return remapped_path
def gn_path_to_blueprint_path(source):
# gn uses '//' to indicate the root directory, blueprint uses the .bp file's location
return remap_gn_path(re.sub(r'^//?', '', source))
def gn_paths_to_blueprint_paths(paths):
rebased_paths = []
for path in paths:
rebased_paths.append(gn_path_to_blueprint_path(path))
return rebased_paths
def gn_sources_to_blueprint_sources(sources):
# Blueprints only list source files in the sources list. Headers are only referenced though
# include paths.
file_extension_allowlist = [
'.c',
'.cc',
'.cpp',
]
rebased_sources = []
for source in sources:
if os.path.splitext(source)[1] in file_extension_allowlist:
rebased_sources.append(gn_path_to_blueprint_path(source))
return rebased_sources
target_blockist = [
'//build/config:shared_library_deps',
'//third_party/vulkan-validation-layers/src:vulkan_clean_old_validation_layer_objects',
]
include_blocklist = [
'//out/Android/gen/third_party/vulkan-deps/glslang/src/include/',
]
def gn_deps_to_blueprint_deps(target_info, build_info):
static_libs = []
shared_libs = []
defaults = []
generated_headers = []
header_libs = []
if 'deps' not in target_info:
return static_libs, defaults
for dep in target_info['deps']:
if dep not in target_blockist:
dep_info = build_info[dep]
blueprint_dep_name = gn_target_to_blueprint_target(dep, dep_info)
# Depending on the dep type, blueprints reference it differently.
gn_dep_type = dep_info['type']
if gn_dep_type == 'static_library':
static_libs.append(blueprint_dep_name)
elif gn_dep_type == 'shared_library':
shared_libs.append(blueprint_dep_name)
elif gn_dep_type == 'source_set' or gn_dep_type == 'group':
defaults.append(blueprint_dep_name)
elif gn_dep_type == 'action':
generated_headers.append(blueprint_dep_name)
# Blueprints do not chain linking of static libraries.
(child_static_libs, _, _, child_generated_headers, _) = gn_deps_to_blueprint_deps(
dep_info, build_info)
# Each target needs to link all child static library dependencies.
static_libs += child_static_libs
# Each blueprint target runs genrules in a different output directory unlike GN. If a
# target depends on another's genrule, it wont find the outputs. Propogate generated
# headers up the dependency stack.
generated_headers += child_generated_headers
return static_libs, shared_libs, defaults, generated_headers, header_libs
def gn_libs_to_blueprint_shared_libraries(target_info):
lib_blockist = [
'android_support',
'unwind',
]
result = []
if 'libs' in target_info:
for lib in target_info['libs']:
if lib not in lib_blockist:
android_lib = lib if '@' in lib else 'lib' + lib
result.append(android_lib)
return result
def gn_include_dirs_to_blueprint_include_dirs(target_info):
result = []
if 'include_dirs' in target_info:
for include_dir in target_info['include_dirs']:
if len(include_dir) > 0 and include_dir not in include_blocklist:
result.append(gn_path_to_blueprint_path(include_dir))
return result
def escape_quotes(string):
return string.replace("\"", "\\\"").replace("\'", "\\\'")
angle_cpu_bits_define = r'^ANGLE_IS_[0-9]+_BIT_CPU$'
def gn_cflags_to_blueprint_cflags(target_info):
result = []
# regexs of allowlisted cflags
cflag_allowlist = [
r'^-Wno-.*$', # forward cflags that disable warnings
r'-mpclmul' # forward "-mpclmul" (used by zlib)
]
for cflag_type in ['cflags', 'cflags_c', 'cflags_cc']:
if cflag_type in target_info:
for cflag in target_info[cflag_type]:
for allowlisted_cflag in cflag_allowlist:
if re.search(allowlisted_cflag, cflag):
result.append(cflag)
# Chrome and Android use different versions of Clang which support differnt warning options.
# Ignore errors about unrecognized warning flags.
result.append('-Wno-unknown-warning-option')
if 'defines' in target_info:
for define in target_info['defines']:
# Don't emit ANGLE's CPU-bits define here, it will be part of the arch-specific
# information later
result.append('-D%s' % escape_quotes(define))
return result
blueprint_library_target_types = {
"static_library": "cc_library_static",
"shared_library": "cc_library_shared",
"source_set": "cc_defaults",
"group": "cc_defaults",
}
def merge_bps(bps_for_abis):
common_bp = {}
for abi in abi_targets:
for key in bps_for_abis[abi]:
if isinstance(bps_for_abis[abi][key], list):
# Find list values that are common to all ABIs
for value in bps_for_abis[abi][key]:
value_in_all_abis = True
for abi2 in abi_targets:
if key == 'defaults':
# arch-specific defaults are not supported
break
value_in_all_abis = value_in_all_abis and (key in bps_for_abis[abi2].keys(
)) and (value in bps_for_abis[abi2][key])
if value_in_all_abis:
if key in common_bp.keys():
common_bp[key].append(value)
else:
common_bp[key] = [value]
else:
if 'arch' not in common_bp.keys():
# Make sure there is an 'arch' entry to hold ABI-specific values
common_bp['arch'] = {}
for abi3 in abi_targets:
common_bp['arch'][abi3] = {}
if key in common_bp['arch'][abi].keys():
common_bp['arch'][abi][key].append(value)
else:
common_bp['arch'][abi][key] = [value]
else:
# Assume everything that's not a list is common to all ABIs
common_bp[key] = bps_for_abis[abi][key]
return common_bp
def library_target_to_blueprint(target, build_info):
bps_for_abis = {}
blueprint_type = ""
for abi in abi_targets:
if target not in build_info[abi].keys():
bps_for_abis[abi] = {}
continue
target_info = build_info[abi][target]
blueprint_type = blueprint_library_target_types[target_info['type']]
bp = {'name': gn_target_to_blueprint_target(target, target_info)}
if 'sources' in target_info:
bp['srcs'] = gn_sources_to_blueprint_sources(target_info['sources'])
(bp['static_libs'], bp['shared_libs'], bp['defaults'], bp['generated_headers'],
bp['header_libs']) = gn_deps_to_blueprint_deps(target_info, build_info[abi])
bp['shared_libs'] += gn_libs_to_blueprint_shared_libraries(target_info)
bp['local_include_dirs'] = gn_include_dirs_to_blueprint_include_dirs(target_info)
bp['cflags'] = gn_cflags_to_blueprint_cflags(target_info)
bp['sdk_version'] = sdk_version
bp['stl'] = stl
if target in root_targets:
bp['vendor'] = True
bp['target'] = {'android': {'relative_install_path': 'egl'}}
bps_for_abis[abi] = bp
common_bp = merge_bps(bps_for_abis)
return blueprint_type, common_bp
def gn_action_args_to_blueprint_args(blueprint_inputs, blueprint_outputs, args):
# TODO: pass the gn gen folder as an arg so we know how to get from the gen path to the root
# path. b/150457277
remap_folders = [
# Specific special-cases first, since the other will strip the prefixes.
('gen/third_party/vulkan-deps/glslang/src/include/glslang/build_info.h',
'glslang/build_info.h'),
('third_party/vulkan-deps/glslang/src',
'external/angle/third_party/vulkan-deps/glslang/src'),
('../../', ''),
('gen/', ''),
]
result_args = []
for arg in args:
# Attempt to find if this arg is a path to one of the inputs. If it is, use the blueprint
# $(location <path>) argument instead so the path gets remapped properly to the location
# that the script is run from
remapped_path_arg = arg
for (remap_source, remap_dest) in remap_folders:
remapped_path_arg = remapped_path_arg.replace(remap_source, remap_dest)
if remapped_path_arg in blueprint_inputs or remapped_path_arg in blueprint_outputs:
result_args.append('$(location %s)' % remapped_path_arg)
elif os.path.basename(remapped_path_arg) in blueprint_outputs:
result_args.append('$(location %s)' % os.path.basename(remapped_path_arg))
else:
result_args.append(remapped_path_arg)
return result_args
blueprint_gen_types = {
"action": "cc_genrule",
}
inputs_blocklist = [
'//.git/HEAD',
]
outputs_remap = {
'build_info.h': 'glslang/build_info.h',
}
def is_input_in_tool_files(tool_files, input):
return input in tool_files
def action_target_to_blueprint(target, build_info):
target_info = build_info[target]
blueprint_type = blueprint_gen_types[target_info['type']]
bp = {'name': gn_target_to_blueprint_target(target, target_info)}
# Blueprints use only one 'srcs', merge all gn inputs into one list.
gn_inputs = []
if 'inputs' in target_info:
for input in target_info['inputs']:
if input not in inputs_blocklist:
gn_inputs.append(input)
if 'sources' in target_info:
gn_inputs += target_info['sources']
# Filter out the 'script' entry since Android.bp doesn't like the duplicate entries
if 'script' in target_info:
gn_inputs = [
input for input in gn_inputs
if not is_input_in_tool_files(target_info['script'], input)
]
bp_srcs = gn_paths_to_blueprint_paths(gn_inputs)
bp['srcs'] = bp_srcs
# genrules generate the output right into the 'root' directory. Strip any path before the
# file name.
bp_outputs = []
for gn_output in target_info['outputs']:
output = os.path.basename(gn_output)
if output in outputs_remap.keys():
output = outputs_remap[output]
bp_outputs.append(output)
bp['out'] = bp_outputs
bp['tool_files'] = [gn_path_to_blueprint_path(target_info['script'])]
# Generate the full command, $(location) refers to tool_files[0], the script
cmd = ['$(location)'] + gn_action_args_to_blueprint_args(bp_srcs, bp_outputs,
target_info['args'])
bp['cmd'] = ' '.join(cmd)
bp['sdk_version'] = sdk_version
return blueprint_type, bp
def gn_target_to_blueprint(target, build_info):
for abi in abi_targets:
gn_type = build_info[abi][target]['type']
if gn_type in blueprint_library_target_types:
return library_target_to_blueprint(target, build_info)
elif gn_type in blueprint_gen_types:
return action_target_to_blueprint(target, build_info[abi])
else:
# Target is not used by this ABI
continue
def get_gn_target_dependencies(output_dependencies, build_info, target):
if target not in output_dependencies:
output_dependencies.insert(0, target)
for dep in build_info[target]['deps']:
if dep in target_blockist:
# Blocklisted dep
continue
if dep not in build_info:
# No info for this dep, skip it
continue
# Recurse
get_gn_target_dependencies(output_dependencies, build_info, dep)
def main():
parser = argparse.ArgumentParser(
description='Generate Android blueprints from gn descriptions.')
for abi in abi_targets:
fixed_abi = abi
if abi == abi_x64:
fixed_abi = 'x64' # gn uses x64, rather than x86_64
parser.add_argument(
'gn_json_' + fixed_abi,
help=fixed_abi +
'gn desc in json format. Generated with \'gn desc <out_dir> --format=json "*"\'.')
args = vars(parser.parse_args())
build_info = {}
for abi in abi_targets:
fixed_abi = abi
if abi == abi_x64:
fixed_abi = 'x64' # gn uses x64, rather than x86_64
with open(args['gn_json_' + fixed_abi], 'r') as f:
build_info[abi] = json.load(f)
targets_to_write = []
for abi in abi_targets:
for root_target in root_targets:
get_gn_target_dependencies(targets_to_write, build_info[abi], root_target)
blueprint_targets = []
for target in targets_to_write:
blueprint_targets.append(gn_target_to_blueprint(target, build_info))
# Add APKs with all of the root libraries
blueprint_targets.append(('filegroup', {
'name': 'ANGLE_srcs',
'srcs': ['src/**/*.java',],
}))
blueprint_targets.append((
'java_defaults',
{
'name': 'ANGLE_java_defaults',
'sdk_version': 'system_current',
'min_sdk_version': sdk_version,
'compile_multilib': 'both',
'use_embedded_native_libs': True,
'jni_libs': [
# hack: assume abi_arm
gn_target_to_blueprint_target(target, build_info[abi_arm][target])
for target in root_targets
],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
# Give com.android.angle.common Java files access to the R class
'--extra-packages com.android.angle.common',
],
'srcs': [':ANGLE_srcs'],
'plugins': ['java_api_finder',],
'privileged': True,
'owner': 'google',
}))
blueprint_targets.append((
'android_library',
{
'name': 'ANGLE_library',
'sdk_version': 'system_current',
'min_sdk_version': sdk_version,
'resource_dirs': ['src/android_system_settings/res',],
'asset_dirs': ['src/android_system_settings/assets',],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
'static_libs': ['androidx.preference_preference',],
}))
blueprint_targets.append(('android_app', {
'name': 'ANGLE',
'defaults': ['ANGLE_java_defaults'],
'static_libs': ['ANGLE_library'],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
}))
output = [
"""// GENERATED FILE - DO NOT EDIT.
// Generated by %s
//
// Copyright 2020 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
""" % sys.argv[0]
]
for (blueprint_type, blueprint_data) in blueprint_targets:
write_blueprint(output, blueprint_type, blueprint_data)
print('\n'.join(output))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -6,153,206,636,641,283,000 | 33.013841 | 119 | 0.588708 | false |
biothings/biothings_explorer | tests/test_apis/test_cordmolecularactivity.py | 1 | 4943 | import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from .utils import get_apis
reg = Registry()
class TestSingleHopQuery(unittest.TestCase):
def test_ma2protein(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Protein",
input_cls="MolecularActivity",
input_id="GO",
pred="related_to",
values="GO:0050626",
)
seqd.query()
self.assertTrue("PR:000015198" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["PR:000015198"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2genomicentity(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="GenomicEntity",
input_cls="MolecularActivity",
pred="related_to",
input_id="GO",
output_id="SO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("SO:0000121" in seqd.G)
self.assertTrue("SO:0000167" in seqd.G)
def test_ma2chemicalsubstance(self):
"""Test gene-genomic entity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="ChemicalSubstance",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CHEBI:50526" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["CHEBI:50526"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2gene(self):
"""Test gene-gene"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Gene",
input_cls="MolecularActivity",
input_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CD55" in seqd.G)
self.assertTrue("AKT1" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["CD55"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2anatomy(self):
"""Test gene-anatomy"""
seqd = SingleEdgeQueryDispatcher(
output_cls="AnatomicalEntity",
input_cls="MolecularActivity",
input_id="GO",
output_id="UBERON",
values="GO:0050626",
)
seqd.query()
self.assertTrue("UBERON:0000062" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["UBERON:0000062"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2ma(self):
"""Test gene-molecular_activity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="MolecularActivity",
input_cls="MolecularActivity",
input_id="GO",
output_id="MOP",
values="GO:0050626",
)
seqd.query()
self.assertTrue("MOP:0000797" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["MOP:0000797"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2bp(self):
"""Test gene-biological_process"""
seqd = SingleEdgeQueryDispatcher(
output_cls="BiologicalProcess",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("GO:0006935" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["GO:0006935"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2cc(self):
"""Test gene-cellular_component"""
seqd = SingleEdgeQueryDispatcher(
output_cls="CellularComponent",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("GO:0005790" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["GO:0005790"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2cell(self):
"""Test gene-cell"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Cell",
input_cls="MolecularActivity",
input_id="GO",
output_id="CL",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CL:0000097" in seqd.G)
def test_ma2disease(self):
"""Test gene-disease"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Disease",
input_cls="MolecularActivity",
input_id="GO",
output_id="DOID",
values="GO:0050626",
)
seqd.query()
self.assertTrue("DOID:1883" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["DOID:1883"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
| apache-2.0 | 8,533,805,272,853,411,000 | 32.856164 | 78 | 0.564839 | false |
ESOedX/edx-platform | openedx/core/lib/blockstore_api/methods.py | 1 | 12782 | """
API Client methods for working with Blockstore bundles and drafts
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
from uuid import UUID
import dateutil.parser
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import requests
import six
from .models import (
Bundle,
Collection,
Draft,
BundleFile,
DraftFile,
LinkDetails,
LinkReference,
DraftLinkDetails,
)
from .exceptions import (
NotFound,
CollectionNotFound,
BundleNotFound,
DraftNotFound,
BundleFileNotFound,
)
def api_url(*path_parts):
if not settings.BLOCKSTORE_API_URL or not settings.BLOCKSTORE_API_URL.endswith('/api/v1/'):
raise ImproperlyConfigured('BLOCKSTORE_API_URL must be set and should end with /api/v1/')
return settings.BLOCKSTORE_API_URL + '/'.join(path_parts)
def api_request(method, url, **kwargs):
"""
Helper method for making a request to the Blockstore REST API
"""
if not settings.BLOCKSTORE_API_AUTH_TOKEN:
raise ImproperlyConfigured("Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.")
kwargs.setdefault('headers', {})['Authorization'] = "Token {}".format(settings.BLOCKSTORE_API_AUTH_TOKEN)
response = requests.request(method, url, **kwargs)
if response.status_code == 404:
raise NotFound
response.raise_for_status()
if response.status_code == 204:
return None # No content
return response.json()
def _collection_from_response(data):
"""
Given data about a Collection returned by any blockstore REST API, convert it to
a Collection instance.
"""
return Collection(uuid=UUID(data['uuid']), title=data['title'])
def _bundle_from_response(data):
"""
Given data about a Bundle returned by any blockstore REST API, convert it to
a Bundle instance.
"""
return Bundle(
uuid=UUID(data['uuid']),
title=data['title'],
description=data['description'],
slug=data['slug'],
# drafts: Convert from a dict of URLs to a dict of UUIDs:
drafts={draft_name: UUID(url.split('/')[-1]) for (draft_name, url) in data['drafts'].items()},
# versions field: take the last one and convert it from URL to an int
# i.e.: [..., 'https://blockstore/api/v1/bundle_versions/bundle_uuid,15'] -> 15
latest_version=int(data['versions'][-1].split(',')[-1]) if data['versions'] else 0,
)
def _draft_from_response(data):
"""
Given data about a Draft returned by any blockstore REST API, convert it to
a Draft instance.
"""
return Draft(
uuid=UUID(data['uuid']),
bundle_uuid=UUID(data['bundle_uuid']),
name=data['name'],
updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),
files={
path: DraftFile(path=path, **file)
for path, file in data['staged_draft']['files'].items()
},
links={
name: DraftLinkDetails(
name=name,
direct=LinkReference(**link["direct"]),
indirect=[LinkReference(**ind) for ind in link["indirect"]],
modified=link["modified"],
)
for name, link in data['staged_draft']['links'].items()
}
)
def get_collection(collection_uuid):
"""
Retrieve metadata about the specified collection
Raises CollectionNotFound if the collection does not exist
"""
assert isinstance(collection_uuid, UUID)
try:
data = api_request('get', api_url('collections', str(collection_uuid)))
except NotFound:
raise CollectionNotFound("Collection {} does not exist.".format(collection_uuid))
return _collection_from_response(data)
def create_collection(title):
"""
Create a new collection.
"""
result = api_request('post', api_url('collections'), json={"title": title})
return _collection_from_response(result)
def update_collection(collection_uuid, title):
"""
Update a collection's title
"""
assert isinstance(collection_uuid, UUID)
data = {"title": title}
result = api_request('patch', api_url('collections', str(collection_uuid)), json=data)
return _collection_from_response(result)
def delete_collection(collection_uuid):
"""
Delete a collection
"""
assert isinstance(collection_uuid, UUID)
api_request('delete', api_url('collections', str(collection_uuid)))
def get_bundle(bundle_uuid):
"""
Retrieve metadata about the specified bundle
Raises BundleNotFound if the bundle does not exist
"""
assert isinstance(bundle_uuid, UUID)
try:
data = api_request('get', api_url('bundles', str(bundle_uuid)))
except NotFound:
raise BundleNotFound("Bundle {} does not exist.".format(bundle_uuid))
return _bundle_from_response(data)
def create_bundle(collection_uuid, slug, title="New Bundle", description=""):
"""
Create a new bundle.
Note that description is currently required.
"""
result = api_request('post', api_url('bundles'), json={
"collection_uuid": str(collection_uuid),
"slug": slug,
"title": title,
"description": description,
})
return _bundle_from_response(result)
def update_bundle(bundle_uuid, **fields):
"""
Update a bundle's title, description, slug, or collection.
"""
assert isinstance(bundle_uuid, UUID)
data = {}
# Most validation will be done by Blockstore, so we don't worry too much about data validation
for str_field in ("title", "description", "slug"):
if str_field in fields:
data[str_field] = fields.pop(str_field)
if "collection_uuid" in fields:
data["collection_uuid"] = str(fields.pop("collection_uuid"))
if fields:
raise ValueError("Unexpected extra fields passed to update_bundle: {}".format(fields.keys()))
result = api_request('patch', api_url('bundles', str(bundle_uuid)), json=data)
return _bundle_from_response(result)
def delete_bundle(bundle_uuid):
"""
Delete a bundle
"""
assert isinstance(bundle_uuid, UUID)
api_request('delete', api_url('bundles', str(bundle_uuid)))
def get_draft(draft_uuid):
"""
Retrieve metadata about the specified draft.
If you don't know the draft's UUID, look it up using get_bundle()
"""
assert isinstance(draft_uuid, UUID)
try:
data = api_request('get', api_url('drafts', str(draft_uuid)))
except NotFound:
raise DraftNotFound("Draft does not exist: {}".format(draft_uuid))
return _draft_from_response(data)
def get_or_create_bundle_draft(bundle_uuid, draft_name):
"""
Retrieve metadata about the specified draft.
"""
bundle = get_bundle(bundle_uuid)
try:
return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object
except KeyError:
# The draft doesn't exist yet, so create it:
response = api_request('post', api_url('drafts'), json={
"bundle_uuid": str(bundle_uuid),
"name": draft_name,
})
# The result of creating a draft doesn't include all the fields we want, so retrieve it now:
return get_draft(UUID(response["uuid"]))
def commit_draft(draft_uuid):
"""
Commit all of the pending changes in the draft, creating a new version of
the associated bundle.
Does not return any value.
"""
api_request('post', api_url('drafts', str(draft_uuid), 'commit'))
def delete_draft(draft_uuid):
"""
Delete the specified draft, removing any staged changes/files/deletes.
Does not return any value.
"""
api_request('delete', api_url('drafts', str(draft_uuid)))
def get_bundle_version_files(bundle_uuid, version_number):
"""
Get a list of the files in the specified bundle version
"""
if version_number == 0:
return []
version_url = api_url('bundle_versions', str(bundle_uuid) + ',' + str(version_number))
version_info = api_request('get', version_url)
return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info["snapshot"]["files"].items()]
def get_bundle_version_links(bundle_uuid, version_number):
"""
Get a dictionary of the links in the specified bundle version
"""
if version_number == 0:
return []
version_url = api_url('bundle_versions', str(bundle_uuid) + ',' + str(version_number))
version_info = api_request('get', version_url)
return {
name: LinkDetails(
name=name,
direct=LinkReference(**link["direct"]),
indirect=[LinkReference(**ind) for ind in link["indirect"]],
)
for name, link in version_info['snapshot']['links'].items()
}
def get_bundle_files_dict(bundle_uuid, use_draft=None):
"""
Get a dict of all the files in the specified bundle.
Returns a dict where the keys are the paths (strings) and the values are
BundleFile or DraftFile tuples.
"""
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).files
elif not bundle.latest_version:
# This bundle has no versions so definitely does not contain any files
return {}
else:
return {file_meta.path: file_meta for file_meta in get_bundle_version_files(bundle_uuid, bundle.latest_version)}
def get_bundle_files(bundle_uuid, use_draft=None):
"""
Get a flat list of all the files in the specified bundle or draft.
"""
return get_bundle_files_dict(bundle_uuid, use_draft).values()
def get_bundle_links(bundle_uuid, use_draft=None):
"""
Get a dict of all the links in the specified bundle.
Returns a dict where the keys are the link names (strings) and the values
are LinkDetails or DraftLinkDetails tuples.
"""
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).links
elif not bundle.latest_version:
# This bundle has no versions so definitely does not contain any links
return {}
else:
return get_bundle_version_links(bundle_uuid, bundle.latest_version)
def get_bundle_file_metadata(bundle_uuid, path, use_draft=None):
"""
Get the metadata of the specified file.
"""
assert isinstance(bundle_uuid, UUID)
files_dict = get_bundle_files_dict(bundle_uuid, use_draft=use_draft)
try:
return files_dict[path]
except KeyError:
raise BundleFileNotFound(
"Bundle {} (draft: {}) does not contain a file {}".format(bundle_uuid, use_draft, path)
)
def get_bundle_file_data(bundle_uuid, path, use_draft=None):
"""
Read all the data in the given bundle file and return it as a
binary string.
Do not use this for large files!
"""
metadata = get_bundle_file_metadata(bundle_uuid, path, use_draft)
with requests.get(metadata.url, stream=True) as r:
return r.content
def write_draft_file(draft_uuid, path, contents):
"""
Create or overwrite the file at 'path' in the specified draft with the given
contents. To delete a file, pass contents=None.
If you don't know the draft's UUID, look it up using
get_or_create_bundle_draft()
Does not return anything.
"""
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'files': {
path: encode_str_for_draft(contents) if contents is not None else None,
},
})
def set_draft_link(draft_uuid, link_name, bundle_uuid, version):
"""
Create or replace the link with the given name in the specified draft so
that it points to the specified bundle version. To delete a link, pass
bundle_uuid=None, version=None.
If you don't know the draft's UUID, look it up using
get_or_create_bundle_draft()
Does not return anything.
"""
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'links': {
link_name: {"bundle_uuid": str(bundle_uuid), "version": version} if bundle_uuid is not None else None,
},
})
def encode_str_for_draft(input_str):
"""
Given a string, return UTF-8 representation that is then base64 encoded.
"""
if isinstance(input_str, six.text_type):
binary = input_str.encode('utf8')
else:
binary = input_str
return base64.b64encode(binary)
| agpl-3.0 | 7,530,887,622,473,606,000 | 31.607143 | 120 | 0.649664 | false |
ahmadia/binder | web/app.py | 1 | 5636 | import Queue
import json
import signal
import time
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from tornado.httpserver import HTTPServer
from binder.service import Service
from binder.app import App
from binder.cluster import ClusterManager
from .builder import Builder
# TODO move settings into a config file
PORT = 8080
NUM_WORKERS = 10
PRELOAD = True
QUEUE_SIZE = 50
ALLOW_ORIGIN = True
build_queue = Queue.Queue(QUEUE_SIZE)
class BinderHandler(RequestHandler):
def get(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
def post(self):
if ALLOW_ORIGIN:
self.set_header('Access-Control-Allow-Origin', '*')
class BuildHandler(BinderHandler):
def _is_malformed(self, spec):
# by default, there aren't any required fields in an app specification
pass
def _write_build_state(self, app):
if app.build_state == App.BuildState.BUILDING:
self.write({"build_status": "building"})
elif app.build_state == App.BuildState.FAILED:
self.write({"build_status": "failed"})
elif app.build_state == App.BuildState.COMPLETED:
self.write({"build_status": "completed"})
else:
self.write({"build_status": "unknown"})
class GithubHandler(BuildHandler):
def _is_malformed(self, spec):
# in the GithubHandler, the repo field is inferred from organization/repo
return "repo" in spec
def _make_app_name(self, organization, repo):
return (organization + "-" + repo).lower()
class GithubStatusHandler(GithubHandler):
def get(self, organization, repo):
super(GithubStatusHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if not app:
self.set_status(404)
self.write({"error": "app does not exist"})
else:
self._write_build_state(app)
class GithubBuildHandler(GithubHandler):
@gen.coroutine
def get(self, organization, repo):
# if the app is still building, return an error. If the app is built, deploy it and return
# the redirect url
super(GithubHandler, self).get()
app_name = self._make_app_name(organization, repo)
app = App.get_app(app_name)
if app and app.build_state == App.BuildState.COMPLETED:
redirect_url = app.deploy("single-node")
self.write({"redirect_url": redirect_url})
else:
self.set_status(404)
self.write({"error": "no app available to deploy"})
def post(self, organization, repo):
# if the spec is properly formed, create/build the app
super(GithubBuildHandler, self).post()
print("request.body: {}".format(self.request.body))
spec = json.loads(self.request.body)
if self._is_malformed(spec):
self.set_status(400)
self.write({"error": "malformed app specification"})
else:
try:
spec["name"] = self._make_app_name(organization, repo).lower()
spec["repo"] = "https://www.github.com/{0}/{1}".format(organization, repo)
build_queue.put(spec)
self.write({"success": "app submitted to build queue"})
except Queue.Full:
self.write({"error": "build queue full"})
class OtherSourceHandler(BuildHandler):
def get(self, app_id):
pass
def post(self, app_id):
pass
class ServicesHandler(BinderHandler):
def get(self):
super(ServicesHandler, self).get()
services = Service.get_service()
self.write({"services": [service.full_name for service in services]})
class AppsHandler(BinderHandler):
def get(self):
super(AppsHandler, self).get()
apps = App.get_app()
self.write({"apps": [app.name for app in apps]})
class CapacityHandler(BinderHandler):
POLL_PERIOD = 3600
cached_capacity = None
last_poll = None
def get(self):
super(CapacityHandler, self).get()
cm = ClusterManager.get_instance()
# don't count the default and kube-system namespaces
running = len(cm.get_running_apps()) - 3
if not self.last_poll or not self.cached_capacity or\
time.time() - self.last_poll > CapacityHandler.POLL_PERIOD:
capacity = cm.get_total_capacity()
CapacityHandler.cached_capacity = capacity
CapacityHandler.last_poll = time.time()
self.write({"capacity": self.cached_capacity, "running": running})
def sig_handler(sig, frame):
IOLoop.instance().add_callback(shutdown)
def shutdown():
print("Shutting down...")
IOLoop.instance().stop()
builder.stop()
def main():
application = Application([
(r"/apps/(?P<organization>.+)/(?P<repo>.+)/status", GithubStatusHandler),
(r"/apps/(?P<organization>.+)/(?P<repo>.+)", GithubBuildHandler),
(r"/apps/(?P<app_id>.+)", OtherSourceHandler),
(r"/services", ServicesHandler),
(r"/apps", AppsHandler),
(r"/capacity", CapacityHandler)
], debug=True)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
global builder
builder = Builder(build_queue, PRELOAD)
builder.start()
http_server = HTTPServer(application)
http_server.listen(PORT)
print("Binder API server running on port {}".format(PORT))
IOLoop.current().start()
if __name__ == "__main__":
main()
| apache-2.0 | -671,810,061,599,228,800 | 29.464865 | 98 | 0.625798 | false |
madsmpedersen/MMPE | datastructures/unittests/test_caching.py | 1 | 1854 | '''
Created on 08/11/2013
@author: mmpe
'''
import multiprocessing
import time
import unittest
from mmpe.functions.timing import get_time
from mmpe.datastructures.caching import cache_function, set_cache_property
class Example(object):
def __init__(self, *args, **kwargs):
object.__init__(self, *args, **kwargs)
set_cache_property(self, "test", self.slow_function)
set_cache_property(self, 'pool', lambda : multiprocessing.Pool(20))
def slow_function(self):
time.sleep(1)
return 1
@cache_function
def test_cache_function(self):
return self.slow_function()
@get_time
def prop(self, prop):
return getattr(self, prop)
def f(x):
return x ** 2
class TestCacheProperty(unittest.TestCase):
def setUp(self):
pass
def testcache_property_test(self):
e = Example()
self.assertGreaterEqual(e.prop("test")[1], 1) # run test
self.assertAlmostEqual(e.prop("test")[1], 0, 2) # return cache result
def testcache_property_pool(self):
e = Example()
e.prop("pool") # loads pool
self.assertAlmostEqual(e.prop("pool")[1], 0, places=2) # cache result
#print (get_time(e.pool.map)(f, range(10)))
def test_cache_function(self):
e = Example()
self.assertAlmostEqual(get_time(e.test_cache_function)()[1], 1, places=2)
self.assertAlmostEqual(get_time(e.test_cache_function)()[1], 0, places=2)
self.assertAlmostEqual(get_time(e.test_cache_function)(reload=True)[1], 1, places=2)
self.assertAlmostEqual(get_time(e.test_cache_function)()[1], 0, places=2)
e.clear_cache()
self.assertAlmostEqual(get_time(e.test_cache_function)()[1], 1, places=2)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| apache-2.0 | -2,271,402,412,449,235,500 | 25.112676 | 92 | 0.62945 | false |
mithro/HDMI2USB-litex-firmware | targets/neso/base.py | 1 | 5595 | # Support for the Numato Neso Artix 7 100T Board
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.modules import MT41K128M16
from litedram.phy import a7ddrphy
from litedram.core import ControllerSettings
from gateware import info
from gateware import spi_flash
from targets.utils import csr_map_update, period_ns
class _CRG(Module):
def __init__(self, platform):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
self.clock_domains.cd_clk50 = ClockDomain()
clk100 = platform.request("clk100")
pll_locked = Signal()
pll_fb = Signal()
self.pll_sys = Signal()
pll_sys4x = Signal()
pll_sys4x_dqs = Signal()
pll_clk200 = Signal()
pll_clk50 = Signal()
self.specials += [
Instance("PLLE2_BASE",
p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked,
# VCO @ 1600 MHz
p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=10.0,
p_CLKFBOUT_MULT=16, p_DIVCLK_DIVIDE=1,
i_CLKIN1=clk100, i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb,
# 100 MHz
p_CLKOUT0_DIVIDE=16, p_CLKOUT0_PHASE=0.0,
o_CLKOUT0=self.pll_sys,
# 400 MHz
p_CLKOUT1_DIVIDE=4, p_CLKOUT1_PHASE=0.0,
o_CLKOUT1=pll_sys4x,
# 400 MHz dqs
p_CLKOUT2_DIVIDE=4, p_CLKOUT2_PHASE=90.0,
o_CLKOUT2=pll_sys4x_dqs,
# 200 MHz
p_CLKOUT3_DIVIDE=8, p_CLKOUT3_PHASE=0.0,
o_CLKOUT3=pll_clk200,
# 50MHz
p_CLKOUT4_DIVIDE=32, p_CLKOUT4_PHASE=0.0,
o_CLKOUT4=pll_clk50
),
Instance("BUFG", i_I=self.pll_sys, o_O=self.cd_sys.clk),
Instance("BUFG", i_I=pll_sys4x, o_O=self.cd_sys4x.clk),
Instance("BUFG", i_I=pll_sys4x_dqs, o_O=self.cd_sys4x_dqs.clk),
Instance("BUFG", i_I=pll_clk200, o_O=self.cd_clk200.clk),
Instance("BUFG", i_I=pll_clk50, o_O=self.cd_clk50.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll_locked),
AsyncResetSynchronizer(self.cd_clk200, ~pll_locked),
AsyncResetSynchronizer(self.cd_clk50, ~pll_locked),
]
reset_counter = Signal(4, reset=15)
ic_reset = Signal(reset=1)
self.sync.clk200 += \
If(reset_counter != 0,
reset_counter.eq(reset_counter - 1)
).Else(
ic_reset.eq(0)
)
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
class BaseSoC(SoCSDRAM):
csr_peripherals = (
"spiflash",
"ddrphy",
"info",
)
csr_map_update(SoCSDRAM.csr_map, csr_peripherals)
mem_map = {
"spiflash": 0x20000000, # (default shadow @0xa0000000)
}
mem_map.update(SoCSDRAM.mem_map)
def __init__(self, platform, spiflash="spiflash_1x", **kwargs):
if 'integrated_rom_size' not in kwargs:
kwargs['integrated_rom_size']=0x8000
if 'integrated_sram_size' not in kwargs:
kwargs['integrated_sram_size']=0x8000
clk_freq = int(100e6)
SoCSDRAM.__init__(self, platform, clk_freq, **kwargs)
self.submodules.crg = _CRG(platform)
self.crg.cd_sys.clk.attr.add("keep")
self.platform.add_period_constraint(self.crg.cd_sys.clk, period_ns(clk_freq))
# Basic peripherals
self.submodules.info = info.Info(platform, self.__class__.__name__)
# spi flash
spiflash_pads = platform.request(spiflash)
spiflash_pads.clk = Signal()
self.specials += Instance("STARTUPE2",
i_CLK=0, i_GSR=0, i_GTS=0, i_KEYCLEARB=0, i_PACK=0,
i_USRCCLKO=spiflash_pads.clk, i_USRCCLKTS=0, i_USRDONEO=1, i_USRDONETS=1)
spiflash_dummy = {
"spiflash_1x": 9,
"spiflash_4x": 11,
}
self.submodules.spiflash = spi_flash.SpiFlash(
spiflash_pads,
dummy=spiflash_dummy[spiflash],
div=2)
self.add_constant("SPIFLASH_PAGE_SIZE", 256)
self.add_constant("SPIFLASH_SECTOR_SIZE", 0x10000)
self.add_wb_slave(mem_decoder(self.mem_map["spiflash"]), self.spiflash.bus)
self.add_memory_region(
"spiflash", self.mem_map["spiflash"] | self.shadow_base, 16*1024*1024)
# sdram
sdram_module = MT41K128M16(self.clk_freq, "1:4")
self.submodules.ddrphy = a7ddrphy.A7DDRPHY(
platform.request("ddram"))
self.add_constant("READ_LEVELING_BITSLIP", 3)
self.add_constant("READ_LEVELING_DELAY", 14)
controller_settings = ControllerSettings(
with_bandwidth=True,
cmd_buffer_depth=8,
with_refresh=True)
self.register_sdram(self.ddrphy,
sdram_module.geom_settings,
sdram_module.timing_settings,
controller_settings=controller_settings)
SoC = BaseSoC
| bsd-2-clause | 329,493,176,064,576,200 | 35.809211 | 107 | 0.562645 | false |
ColtonProvias/sqlalchemy-jsonapi | sqlalchemy_jsonapi/unittests/declarative_tests/test_serialize.py | 1 | 20770 | """Tests for declarative JSONAPISerializer serialize method."""
import unittest
import datetime
from sqlalchemy import (
create_engine, Column, String, Integer, ForeignKey, Boolean, DateTime)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship, sessionmaker
from sqlalchemy_jsonapi.declarative import serializer
class SerializeResourcesWithoutRelatedModels(unittest.TestCase):
"""Tests for serializing a resource that has no related models."""
def setUp(self):
"""Configure sqlalchemy and session."""
self.engine = create_engine('sqlite://')
Session = sessionmaker(bind=self.engine)
self.session = Session()
self.Base = declarative_base()
class User(self.Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
age = Column(Integer, nullable=False)
username = Column(String(50), unique=True, nullable=False)
is_admin = Column(Boolean, default=False)
date_joined = Column(DateTime)
self.User = User
self.Base.metadata.create_all(self.engine)
def tearDown(self):
"""Reset the sqlalchemy engine."""
self.Base.metadata.drop_all(self.engine)
def test_serialize_single_resource_with_only_id_field(self):
"""Serialize a resource with only an 'id' field.
If attributes, other than 'id', are not specified in fields,
then the attributes remain an empty object.
"""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
model = self.User
dasherize = True
user = self.User(
first_name='Sally', age=27, is_admin=True,
username='SallySmith1', date_joined=datetime.date(2017, 12, 5))
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(user)
expected_data = {
'data': {
'id': str(user.id),
'type': user.__tablename__,
'attributes': {},
'relationships': {}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
def test_serialize_single_resource_with_dasherize_true(self):
"""Serialize a resource where attributes are dasherized.
Attribute keys contain dashes instead of underscores.
"""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = [
'id', 'first_name', 'username',
'age', 'date_joined', 'is_admin']
model = self.User
dasherize = True
user = self.User(
first_name='Sally', age=27, is_admin=True,
username='SallySmith1', date_joined=datetime.date(2017, 12, 5))
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(user)
expected_data = {
'data': {
'id': str(user.id),
'type': u'{}'.format(user.__tablename__),
'attributes': {
'date-joined': user.date_joined.isoformat(),
'username': u'{}'.format(user.username),
'age': user.age,
'first-name': u'{}'.format(user.first_name),
'is-admin': user.is_admin
},
'relationships': {}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
def test_serialize_single_resource_with_dasherize_false(self):
"""Serialize a resource where attributes are not dasherized.
Attribute keys are underscored like in serializer model.
"""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = [
'id', 'first_name', 'username',
'age', 'date_joined', 'is_admin']
model = self.User
dasherize = False
user = self.User(
first_name='Sally', age=27, is_admin=True,
username='SallySmith1', date_joined=datetime.date(2017, 12, 5))
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(user)
expected_data = {
'data': {
'id': str(user.id),
'type': u'{}'.format(user.__tablename__),
'attributes': {
'date_joined': user.date_joined.isoformat(),
'username': u'{}'.format(user.username),
'age': user.age,
'first_name': u'{}'.format(user.first_name),
'is_admin': user.is_admin
},
'relationships': {}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
def test_serialize_collection_of_resources(self):
"""Serialize a collection of resources returns a list of objects."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
model = self.User
dasherize = True
user = self.User(
first_name='Sally', age=27, is_admin=True,
username='SallySmith1', date_joined=datetime.date(2017, 12, 5))
self.session.add(user)
self.session.commit()
users = self.session.query(self.User)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(users)
expected_data = {
'data': [{
'id': str(user.id),
'type': 'users',
'attributes': {},
'relationships': {}
}],
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEquals(expected_data, serialized_data)
def test_serialize_empty_collection(self):
"""Serialize a collection that is empty returns an empty list."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
model = self.User
dasherize = True
users = self.session.query(self.User)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(users)
expected_data = {
'data': [],
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEquals(expected_data, serialized_data)
def test_serialize_resource_not_found(self):
"""Serialize a resource that does not exist returns None."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
model = self.User
dasherize = True
# Nonexistant user
user = self.session.query(self.User).get(99999999)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(user)
expected_data = {
'data': None,
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
class SerializeResourceWithRelatedModels(unittest.TestCase):
"""Tests for serializing a resource that has related models."""
def setUp(self):
"""Configure sqlalchemy and session."""
self.engine = create_engine('sqlite://')
Session = sessionmaker(bind=self.engine)
self.session = Session()
self.Base = declarative_base()
class User(self.Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
class Post(self.Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
title = Column(String(100), nullable=False)
author_id = Column(Integer, ForeignKey('users.id',
ondelete='CASCADE'))
blog_author = relationship('User',
lazy='joined',
backref=backref('posts',
lazy='dynamic',
cascade='all,delete'))
self.User = User
self.Post = Post
self.Base.metadata.create_all(self.engine)
def tearDown(self):
"""Reset the sqlalchemy engine."""
self.Base.metadata.drop_all(self.engine)
def test_serialize_resource_with_to_many_relationship_success(self):
"""Serailize a resource with a to-many relationship."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id', 'first_name']
model = self.User
user = self.User(first_name='Sally')
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
serialized_data = user_serializer.serialize(user)
expected_data = {
'data': {
'id': str(user.id),
'type': user.__tablename__,
'attributes': {
'first-name': u'{}'.format(user.first_name)
},
'relationships': {
'posts': {
'links': {
'self': '/users/1/relationships/posts',
'related': '/users/1/posts'
}
}
}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
def test_serialize_resource_with_to_one_relationship_success(self):
"""Serialize a resource with a to-one relationship."""
class PostSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for Post."""
fields = ['id', 'title']
model = self.Post
blog_post = self.Post(title='Foo')
self.session.add(blog_post)
self.session.commit()
post = self.session.query(self.Post).get(blog_post.id)
blog_post_serializer = PostSerializer()
serialized_data = blog_post_serializer.serialize(post)
expected_data = {
'data': {
'id': str(blog_post.id),
'type': blog_post.__tablename__,
'attributes': {
'title': u'{}'.format(blog_post.title)
},
'relationships': {
'blog-author': {
'links': {
'self': '/posts/1/relationships/blog-author',
'related': '/posts/1/blog-author'
}
}
}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
def test_serialize_resource_with_relationship_given_dasherize_false(self):
"""Serialize a resource with to-one relationship given dasherize false.
Relationship keys are underscored like in model.
"""
class PostSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for Post."""
fields = ['id', 'title']
model = self.Post
dasherize = False
blog_post = self.Post(title='Foo')
self.session.add(blog_post)
self.session.commit()
post = self.session.query(self.Post).get(blog_post.id)
blog_post_serializer = PostSerializer()
serialized_data = blog_post_serializer.serialize(post)
expected_data = {
'data': {
'id': str(blog_post.id),
'type': blog_post.__tablename__,
'attributes': {
'title': u'{}'.format(blog_post.title)
},
'relationships': {
'blog_author': {
'links': {
'self': '/posts/1/relationships/blog_author',
'related': '/posts/1/blog_author'
}
}
}
},
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
self.assertEqual(expected_data, serialized_data)
class TestSerializeErrors(unittest.TestCase):
"""Tests for errors raised in serialize method."""
def setUp(self):
"""Configure sqlalchemy and session."""
self.engine = create_engine('sqlite://')
Session = sessionmaker(bind=self.engine)
self.session = Session()
self.Base = declarative_base()
class User(self.Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
class Post(self.Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key=True)
title = Column(String(100), nullable=False)
author_id = Column(Integer, ForeignKey('users.id',
ondelete='CASCADE'))
blog_author = relationship('User',
lazy='joined',
backref=backref('posts',
lazy='dynamic',
cascade='all,delete'))
self.User = User
self.Post = Post
self.Base.metadata.create_all(self.engine)
def tearDown(self):
"""Reset the sqlalchemy engine."""
self.Base.metadata.drop_all(self.engine)
def test_serialize_resource_with_mismatched_model(self):
"""A serializers model type much match the resource it serializes."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
model = self.Post
user = self.User(first_name='Sally')
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
with self.assertRaises(TypeError):
user_serializer.serialize(user)
def test_serialize_resource_with_unknown_attribute_in_fields(self):
"""Cannot serialize attributes that are unknown to resource."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id', 'firsts_names_unknown']
model = self.User
user = self.User(first_name='Sally')
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
with self.assertRaises(AttributeError):
user_serializer.serialize(user)
def test_serialize_resource_with_related_model_in_fields(self):
"""Model serializer fields cannot contain related models.
It is against json-api spec to serialize related models as attributes.
"""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id', 'posts']
model = self.User
user = self.User(first_name='Sally')
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
with self.assertRaises(AttributeError):
user_serializer.serialize(user)
def test_serialize_resource_with_foreign_key_in_fields(self):
"""Model serializer fields cannot contain foreign keys.
It is against json-api spec to serialize foreign keys as attributes.
"""
class PostSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for Post."""
fields = ['id', 'author_id']
model = self.Post
blog_post = self.Post(title='Foo')
self.session.add(blog_post)
self.session.commit()
post = self.session.query(self.Post).get(blog_post.id)
blog_post_serializer = PostSerializer()
with self.assertRaises(AttributeError):
blog_post_serializer.serialize(post)
def test_serialize_resource_with_invalid_primary_key(self):
"""Resource cannot have unknown primary key.
The primary key must be an attribute on the resource.
"""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for Post."""
fields = ['unknown_primary_key', 'first_name']
primary_key = 'unknown_primary_key'
model = self.User
user = self.User(first_name='Sally')
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).get(user.id)
user_serializer = UserSerializer()
with self.assertRaises(AttributeError):
user_serializer.serialize(user)
class TestSerializerInstantiationErrors(unittest.TestCase):
"""Test exceptions raised in instantiation of serializer."""
def setUp(self):
"""Configure sqlalchemy and session."""
self.engine = create_engine('sqlite://')
Session = sessionmaker(bind=self.engine)
self.session = Session()
self.Base = declarative_base()
class User(self.Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(String(50), nullable=False)
self.User = User
self.Base.metadata.create_all(self.engine)
def tearDown(self):
"""Reset the sqlalchemy engine."""
self.Base.metadata.drop_all(self.engine)
def test_serializer_with_no_defined_model(self):
"""Serializer requires model member."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['id']
with self.assertRaises(TypeError):
UserSerializer()
def test_serializer_with_no_defined_fields(self):
"""At minimum fields must exist."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
model = self.User
with self.assertRaises(ValueError):
UserSerializer()
def test_serializer_with_missing_id_field(self):
"""An 'id' is required in serializer fields."""
class UserSerializer(serializer.JSONAPISerializer):
"""Declarative serializer for User."""
fields = ['first_name']
model = self.User
with self.assertRaises(ValueError):
UserSerializer()
| mit | -1,487,117,044,349,505,500 | 33.387417 | 79 | 0.53428 | false |
SergeyStaroletov/Patterns17 | CourseWorkReports/Курсовой проект Киреков ПИ-42/Исходный код/Model/Classifiers/ImageClassifier.py | 1 | 1143 | import keras
import numpy as np
from keras.preprocessing import image
from .IClassifier import IClassifier
class ImageClassifier(IClassifier):
""" классификатор изображений """
def __init__(self):
self.__input_shape = None
self.__aliases = None
self.__model = None
def init_classifier(self, h5file_path, input_shape, aliases):
self.__input_shape = input_shape
self.__aliases = aliases
self.__model = keras.models.load_model(h5file_path)
def classify(self, img_path):
try:
img = image.load_img(img_path, target_size=self.__input_shape)
img = image.img_to_array(img)
x = np.expand_dims(img, axis=0) / 255
a = self.__model.predict(x)[0]
except:
return {'ok': False, 'message': 'На вход не было подано изображение'}
res = []
for i in range(len(a)):
res.append('С вероятностью {0}% это {1}'.format(a[i] * 100, self.__aliases[i]))
return {'ok': True, 'result': res}
| mit | -2,960,119,143,002,614,000 | 31.5625 | 91 | 0.568901 | false |
gmr/mikkoo | mikkoo/statsd.py | 1 | 5519 | """
Statsd Client that takes configuration first from the rejected configuration
file, falling back to environment variables, and finally default values.
Environment Variables:
- STATSD_HOST
- STATSD_PORT
- STATSD_PREFIX
"""
import logging
import os
import socket
from tornado import iostream
LOGGER = logging.getLogger(__name__)
class Client(object):
"""A simple statsd client that buffers counters to emit fewer UDP packets
than once per incr.
"""
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8125
DEFAULT_PREFIX = 'mikkoo'
PAYLOAD_HOSTNAME = '{}.{}.{}.{}:{}|{}\n'
PAYLOAD_NO_HOSTNAME = '{}.{}.{}:{}|{}\n'
def __init__(self, name, settings, failure_callback):
"""
:param str name: The name of the worker for this client
:param dict settings: statsd Settings
"""
self._connected = False
self._failure_callback = failure_callback
self._hostname = socket.gethostname().split('.')[0]
self._name = name
self._settings_in = settings
self._settings = {}
self._address = (self._setting('host', self.DEFAULT_HOST),
int(self._setting('port', self.DEFAULT_PORT)))
self._prefix = self._setting('prefix', self.DEFAULT_PREFIX)
self._tcp_sock, self._udp_sock = None, None
if self._setting('tcp', False):
self._tcp_sock = self._tcp_socket()
else:
self._udp_sock = self._udp_socket()
def add_timing(self, key, value=0):
"""Add a timer value to statsd for the specified key
:param str key: The key to add the timing to
:param int or float value: The value of the timing in seconds
"""
return self._send(key, value * 1000, 'ms')
def incr(self, key, value=1):
"""Increment the counter value in statsd
:param str key: The key to increment
:param int value: The value to increment by, defaults to 1
"""
return self._send(key, value, 'c')
def set_gauge(self, key, value):
"""Set a gauge value in statsd
:param str key: The key to set the value for
:param int or float value: The value to set
"""
return self._send(key, value, 'g')
def stop(self):
"""Close the socket if connected via TCP."""
if self._tcp_sock:
self._tcp_sock.close()
def _build_payload(self, key, value, metric_type):
"""Return the """
if self._setting('include_hostname', True):
return self.PAYLOAD_HOSTNAME.format(
self._prefix, self._hostname, self._name, key, value,
metric_type)
return self.PAYLOAD_NO_HOSTNAME.format(
self._prefix, self._name, key, value, metric_type)
def _send(self, key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
"""
payload = self._build_payload(key, value, metric_type)
LOGGER.debug('Sending statsd payload: %r', payload)
try:
if self._tcp_sock:
return self._tcp_sock.write(payload.encode('utf-8'))
else:
self._udp_sock.sendto(payload.encode('utf-8'), self._address)
except (OSError, socket.error) as error: # pragma: nocover
if self._connected:
LOGGER.exception('Error sending statsd metric: %s', error)
self._connected = False
self._failure_callback()
def _setting(self, key, default):
"""Return the setting, checking config, then the appropriate
environment variable, falling back to the default, caching the
results.
:param str key: The key to get
:param any default: The default value if not set
:return: str
"""
if key not in self._settings:
value = self._settings_in.get(
key, os.environ.get('STATSD_{}'.format(key).upper(), default))
self._settings[key] = value
return self._settings[key]
def _tcp_on_closed(self):
"""Invoked when the socket is closed."""
LOGGER.warning('Disconnected from statsd, reconnecting')
self._connected = False
self._tcp_sock = self._tcp_socket()
def _tcp_on_connected(self):
"""Invoked when the IOStream is connected"""
LOGGER.debug('Connected to statsd at %s via TCP', self._address)
self._connected = True
def _tcp_socket(self):
"""Connect to statsd via TCP and return the IOStream handle.
:rtype: iostream.IOStream
"""
sock = iostream.IOStream(socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP))
try:
sock.connect(self._address, self._tcp_on_connected)
except (OSError, socket.error) as error:
LOGGER.error('Failed to connect via TCP, triggering shutdown: %s',
error)
self._failure_callback()
else:
self._connected = True
sock.set_close_callback(self._tcp_on_closed)
return sock
@staticmethod
def _udp_socket():
"""Return the UDP socket handle
:rtype: socket.socket
"""
return socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
| bsd-3-clause | -7,954,871,945,239,064,000 | 31.464706 | 78 | 0.586157 | false |
f3at/feat | src/feat/configure/uninstalled.py | 1 | 1393 | # F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
import os
import feat
_basedir = os.path.abspath(os.path.join(feat.__path__[0], '..', '..'))
bindir = os.path.join(_basedir, 'bin')
logdir = os.path.join(_basedir, 'log')
rundir = os.path.join(_basedir, 'run')
lockdir = os.path.join(_basedir, 'run')
confdir = os.path.join(_basedir, 'conf')
gatewaydir = os.path.join(_basedir, 'gateway', 'static')
socketdir = os.path.join(_basedir, 'run')
| gpl-2.0 | -509,342,342,318,453,400 | 38.8 | 73 | 0.730079 | false |
bibarz/bibarz.github.io | dabble/ab/auth_algorithms.py | 1 | 17145 | # Import any required libraries or modules.
import numpy as np
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import csv
import sys
class MetaParams:
n_lda_ensemble = 101
lda_ensemble_feature_fraction = 0.4
mode = 'lda_ensemble'
# The following is a hacky container for Statistics computed from the
# whole training set; we don't want to have to recompute them again at every call
# to build_template (it becomes slow for parameter searches with cross validation),
# so we preserve it here between calls. The proper place to
# do this would be in main.py, but we don't want to touch that.
Global = lambda: None
Global.ready = False
def pca_converter(data, feature_discriminabilities, explained_variance):
'''
PCA conversion of the data. The PCA is based on the complete dataset, but each feature
is normalized to a std dev proportional to the given discriminability.
:param data: n_samples x n_features matrix with all data to do PCA on
:param feature_discriminabilities: n_features length vector
:param explained_variance: ratio of explained variance (between 0 and 1) that will
determine how many components are kept
:return: function transforming data into pca components, and covariance matrix
of transformed data
'''
mu = np.mean(data, axis=0)
std = np.std(data, axis=0) / feature_discriminabilities
normalized_data = (data - mu) / std
u, s, vt = np.linalg.svd(normalized_data)
cut_idx = np.argmin(np.abs(np.cumsum(s * s) / np.sum(s * s) - explained_variance))
vt = vt[:cut_idx + 1]
return (lambda x, mu=mu, std=std, vt=vt: np.dot((x - mu) / std, vt.T)),\
np.diag(s[:cut_idx + 1] ** 2 / (len(data) - 1))
def preprocess_data(data):
'''
Turn raw data into an array of hand-picked features useful for classification
:param data: n_samples x n_raw_features numpy array
:return: n_samples x n_processed_features array
'''
keypress_dt = data[:, 8::10] - data[:, 3::10] # duration of each keystroke
key_to_key_dt = data[:, 13::10] - data[:, 3:-10:10] # interval between keystrokes
x_down = data[:, 4::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_down = data[:, 5::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
x_up = data[:, 9::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_up = data[:, 10::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
size_down = data[:, 6::10]
size_up = data[:, 11::10]
pressure_down = data[:, 7::10]
pressure_up = data[:, 12::10]
assert np.all((x_down >= 0) & (x_down <= 1) & (y_down >= 0) & (y_down <= 1))
assert np.all((x_up >= 0) & (x_up <= 1) & (y_up >= 0) & (y_up <= 1))
touch_d = np.hypot(x_down - x_up, y_down - y_up)
collected_data = np.hstack((keypress_dt, key_to_key_dt,
np.diff(x_down, axis=1), np.diff(y_down, axis=1),
touch_d,
size_down, size_up, pressure_down, pressure_up,
))
return collected_data
def get_random_feature_selector(n_all_features, feature_fraction, seed):
'''
Return a selector of random features from a data array
:param n_all_features: total number of features
:param feature_fraction: desired fraction of selected features
:param seed: random seed for repeatable experiments
:return: a function taking in full data and returning only the random features from it
'''
n_features = int(np.round(feature_fraction * n_all_features))
rng = np.random.RandomState(seed)
p = rng.permutation(n_all_features)[:n_features]
return lambda x, p=p: x[..., p]
def simple_gaussian(user_pca):
# template will consist of mean and std dev of each feature in pca space
mean_pca = np.mean(user_pca, axis=0)
std_pca = np.std(user_pca, axis=0)
return mean_pca, std_pca
def scikit_classifier(user, training_dataset, generator=lambda:KNeighborsClassifier(5)):
'''
Train a given classifier on user vs others
:param generator: a function creating a scikit classifier with fit and predict functions
:return: the trained classifier
'''
all_users = training_dataset.keys()
others_raw = np.vstack([training_dataset[u] for u in all_users if u != user])
others_pca = Global.pca(preprocess_data(others_raw))
user_raw = training_dataset[user]
user_pca = Global.pca(preprocess_data(user_raw))
clf = generator()
clf.fit(np.vstack((user_pca, others_pca)),
np.hstack((np.zeros(len(user_pca)), np.ones(len(others_pca)))))
return clf
def lda(user_pca, all_pca_cov, n_all):
'''
Compute the Fisher discriminant vector and threshold to classify user vs others.
:param user_pca: n_samples x n_pca_features array of user instances
:param all_pca_cov: covariance matrix of the complete dataset; it is assumed that
the user data was part of the dataset, and that the mean of the whole dataset
is 0 for every feature
:param n_all: number of samples that formed the complete dataset
:return: Fisher discriminant vector, threshold
'''
n_user = len(user_pca)
assert n_user < n_all - 1 # make sure the complete dataset has more than just the current user
# We compute mean and variance for the user data directly, and infer the mean
# and variance of the rest of the dataset from the covariance of the complete set
# (and its mean, which is assumed zero)
user_mu = np.mean(user_pca, axis=0)
others_mu = - n_user * user_mu / (n_all - n_user)
user_sigma = np.cov(user_pca.T)
def sq_(x):
return x[:, None] * x[None, :]
others_sigma = ((n_all - 1) * all_pca_cov - (n_user - 1) * user_sigma\
- n_user * sq_(user_mu) - (n_all - n_user) * sq_(others_mu)) / (n_all - n_user - 1)
ld_vector = np.dot(np.linalg.inv(user_sigma + others_sigma), user_mu - others_mu) # order determines sign of criterion
ld_vector /= np.linalg.norm(ld_vector)
# find the threshold for equal false positives and false negatives
user_proj_mu = np.dot(user_mu, ld_vector)
others_proj_mu = np.dot(others_mu, ld_vector)
user_proj_std = np.sqrt(np.dot(ld_vector, np.dot(user_sigma, ld_vector)))
others_proj_std = np.sqrt(np.dot(ld_vector, np.dot(others_sigma, ld_vector)))
ld_threshold = (others_proj_std * user_proj_mu + user_proj_std * others_proj_mu) / (user_proj_std + others_proj_std)
return ld_vector, ld_threshold
def compute_feature_discriminabilities(each_preprocessed):
'''
Return a vector of discriminability for each feature
:param each_preprocessed: list with one n_samples x n_features data matrix for each user
:return: vector of discriminabilities (sqrt of the square of the difference of means divided by
the sum of variances) for each feature
'''
n_users = len(each_preprocessed)
each_mu = np.array([np.mean(m, axis=0) for m in each_preprocessed]) # n_users x n_features
each_var = np.array([np.var(m, axis=0) for m in each_preprocessed]) # n_users x n_features
# compute discriminability for each feature and pair of users
pairwise_discriminability = (each_mu[:, None, :] - each_mu[None :, :]) ** 2 / (1e-6 + each_var[:, None, :] + each_var[None :, :])
# compute discriminability of each feature as the average over pairs of users
return np.sqrt(np.sum(pairwise_discriminability, axis=(0, 1)) / (n_users * (n_users - 1)))
def _prepare_global(training_dataset):
'''
Processing of the complete dataset, to be reused for each user
- feature preprocessing
- pca converter
- selection of features and computation of covariances for ensemble lda
:param training_dataset: the complete dataset
:return: None. The Global container is initialized with all necessary data
'''
each_preprocessed = [preprocess_data(training_dataset[u]) for u in training_dataset]
Global.feature_discriminabilities = compute_feature_discriminabilities(each_preprocessed)
all_preprocessed = np.vstack(each_preprocessed)
Global.n_all = len(all_preprocessed)
Global.pca, Global.all_pca_cov = pca_converter(all_preprocessed, Global.feature_discriminabilities, explained_variance=0.98)
if MetaParams.mode == 'lda_ensemble':
Global.lda_ensemble = []
for i in range(MetaParams.n_lda_ensemble):
seed = np.random.randint(200000)
feature_selector = get_random_feature_selector(all_preprocessed.shape[1],
feature_fraction=MetaParams.lda_ensemble_feature_fraction, seed=seed)
selected_pca, selected_pca_cov = pca_converter(feature_selector(all_preprocessed),
feature_selector(Global.feature_discriminabilities),
explained_variance=0.99)
Global.lda_ensemble.append({'selector': feature_selector, 'pca': selected_pca, 'pca_cov': selected_pca_cov})
Global.ready = True
# Implement template building here. Feel free to write any helper classes or functions required.
# Return the generated template for that user.
def build_template(user, training_dataset):
if not Global.ready:
_prepare_global(training_dataset)
user_raw = training_dataset[user]
user_preprocessed = preprocess_data(user_raw)
template = {}
if MetaParams.mode in ['lda', 'simple', 'combined']:
user_pca = Global.pca(user_preprocessed)
template['mean_pca'], template['std_pca'] = simple_gaussian(user_pca)
template['ld_vector'], template['ld_threshold'] =\
lda(user_pca, all_pca_cov=Global.all_pca_cov, n_all=Global.n_all)
if MetaParams.mode == 'lda_ensemble':
lda_ensemble = []
for lda_item in Global.lda_ensemble:
user_selected_pca = lda_item['pca'](lda_item['selector'](user_preprocessed))
ld_vector, ld_threshold = lda(user_selected_pca, n_all=Global.n_all, all_pca_cov=lda_item['pca_cov'])
lda_ensemble.append({'ld_vector': ld_vector, 'ld_threshold': ld_threshold})
template['lda_ensemble'] = lda_ensemble
if MetaParams.mode in ['nonlinear', 'combined']:
template['clf_1'] = scikit_classifier(user, training_dataset, generator=lambda: KNeighborsClassifier(5))
template['clf_2'] = scikit_classifier(user, training_dataset, generator=lambda: svm.LinearSVC(C=0.05, class_weight='balanced'))
return template
# Implement authentication method here. Feel free to write any helper classes or functions required.
# Return the authenttication score and threshold above which you consider it being a correct user.
def authenticate(instance, user, templates):
mode = MetaParams.mode
assert mode in ['lda', 'combined', 'lda_ensemble', 'nonlinear', 'simple'], ("Unrecognized mode: %s" % mode)
t = templates[user]
batch_mode = instance.ndim > 1
if not batch_mode:
instance = instance[None, :]
preprocessed_instance = preprocess_data(instance)
if mode in ['lda', 'combined']:
user_pca = Global.pca(preprocessed_instance)
user_lda_proj = np.dot(user_pca, t['ld_vector'])
lda_score, lda_thr = user_lda_proj - t['ld_threshold'], np.zeros(len(user_lda_proj))
if mode in ['nonlinear', 'combined']:
user_pca = Global.pca(preprocessed_instance)
clf_score_1, clf_thr_1 = (t['clf_1'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
clf_score_2, clf_thr_2 = (t['clf_2'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
if mode == 'simple':
user_pca = Global.pca(preprocessed_instance)
z = (user_pca - t['mean_pca']) / t['std_pca']
distance = np.mean(np.abs(z) ** 2, axis=1) ** 0.5
score, thr = distance, 1.2 * np.ones(len(distance))
if mode == 'lda_ensemble':
ensemble_scores = np.empty((len(preprocessed_instance), len(t['lda_ensemble'])))
for i, sub_t in enumerate(t['lda_ensemble']):
g_item = Global.lda_ensemble[i]
user_selected_pca = g_item['pca'](g_item['selector'](preprocessed_instance))
user_thinned_lda_proj = np.dot(user_selected_pca, sub_t['ld_vector'])
ensemble_scores[:, i] = user_thinned_lda_proj - sub_t['ld_threshold']
score = np.mean(ensemble_scores > 0, axis=1)
thr = 0.5 * np.ones(len(score))
if mode == 'lda':
score, thr = lda_score, lda_thr
elif mode == 'nonlinear':
score, thr = clf_score_1, clf_thr_1
elif mode == 'combined':
score = np.mean(np.vstack((lda_score > lda_thr, clf_score_1 > clf_thr_1, clf_score_2 > clf_thr_2)), axis=0)
thr = 0.5 * np.ones(len(score))
if not batch_mode:
assert score.shape == (1, )
assert thr.shape == (1, )
score, thr = score[0], thr[0]
return score, thr
def cross_validate(full_dataset, print_results=False):
'''
n-fold cross-validation of given dataset
:param full_dataset: dictionary of raw data for each user
:param print_results: if True, print progress messages and results
:return: (percentage of false rejects, percentage of false accepts)
'''
n_folds = 5 # for cross-validation
all_false_accept = 0
all_false_reject = 0
all_true_accept = 0
all_true_reject = 0
for i in range(n_folds):
# split full dataset into training and validation
training_dataset = dict()
validation_dataset = dict()
for u in full_dataset.keys():
n = len(full_dataset[u])
idx = np.round(float(n) / n_folds * np.arange(n_folds + 1)).astype(np.int)
n_validation = np.diff(idx)
rolled_set = np.roll(full_dataset[u], -idx[i], axis=0)
training_dataset[u] = rolled_set[n_validation[i]:, :]
validation_dataset[u] = rolled_set[:n_validation[i], :]
# reset global data
Global.ready = False
templates = {u: build_template(u, training_dataset) for u in training_dataset}
# For each user test authentication.
true_accept = 0
false_reject = 0
true_reject = 0
false_accept = 0
for u in training_dataset:
# Test false rejections.
(score, threshold) = authenticate(validation_dataset[u], u, templates)
true_accept += np.sum(score > threshold)
false_reject += np.sum(score <= threshold)
# Test false acceptance.
for u_attacker in validation_dataset:
if u == u_attacker:
continue
(score, threshold) = authenticate(validation_dataset[u_attacker], u, templates)
false_accept += np.sum(score > threshold)
true_reject += np.sum(score <= threshold)
if print_results:
print "fold %i: false reject rate: %.1f%%, false accept rate: %.1f%%" %\
(i, 100. * float(false_reject) / (false_reject + true_accept),
100. * float(false_accept) / (false_accept + true_reject))
all_false_accept += false_accept
all_false_reject += false_reject
all_true_accept += true_accept
all_true_reject += true_reject
false_reject_percent = 100. * float(all_false_reject) / (all_false_reject + all_true_accept)
false_accept_percent = 100. * float(all_false_accept) / (all_false_accept + all_true_reject)
if print_results:
print "Total: false reject rate: %.1f%%, false accept rate: %.1f%%" % (false_reject_percent, false_accept_percent)
return false_reject_percent, false_accept_percent
if __name__ == "__main__":
# Reading the data into the training dataset separated by user.
data_training_file = open('dataset_training.csv', 'rb')
csv_training_reader = csv.reader(data_training_file, delimiter=',', quotechar='"')
csv_training_reader.next()
full_dataset = dict()
for row in csv_training_reader:
if row[0] not in full_dataset:
full_dataset[row[0]] = np.array([]).reshape((0, len(row[1:])))
full_dataset[row[0]] = np.vstack([full_dataset[row[0]], np.array(row[1:]).astype(float)])
for feature_fraction in [0.4]:
for n_lda_ensemble in [51]:
n_trials = 10
tot_rej = 0
tot_acc = 0
for _ in range(n_trials):
MetaParams.feature_fraction = feature_fraction
MetaParams.n_lda_ensemble = n_lda_ensemble
rej, acc = cross_validate(full_dataset)
tot_rej += rej
tot_acc += acc
print "feature fraction=%.2f, ensemble size=%i, false_rej=%.2f%%, false_acc=%.2f%%" % (feature_fraction, n_lda_ensemble, tot_rej / n_trials, tot_acc / n_trials)
| mit | 2,837,573,524,067,170,300 | 45.972603 | 172 | 0.633829 | false |
maferelo/saleor | saleor/site/migrations/0017_auto_20180803_0528.py | 3 | 1357 | # Generated by Django 2.0.3 on 2018-08-03 10:28
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site", "0016_auto_20180719_0520")]
operations = [
migrations.CreateModel(
name="SiteSettingsTranslation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("language_code", models.CharField(max_length=10)),
("header_text", models.CharField(blank=True, max_length=200)),
("description", models.CharField(blank=True, max_length=500)),
(
"site_settings",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="site.SiteSettings",
),
),
],
),
migrations.AlterUniqueTogether(
name="sitesettingstranslation",
unique_together={("language_code", "site_settings")},
),
]
| bsd-3-clause | -1,561,289,987,221,625,000 | 32.097561 | 78 | 0.46647 | false |
Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/tests/perf_tests/receive_message_batch.py | 1 | 1324 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
from ._test_base import _ReceiveTest
class ReceiveMessageBatchTest(_ReceiveTest):
def run_sync(self):
count = 0
while count < self.args.num_messages:
batch = self.receiver.receive_messages(
max_message_count=self.args.num_messages - count,
max_wait_time=self.args.max_wait_time or None)
if self.args.peeklock:
for msg in batch:
self.receiver.complete_message(msg)
count += len(batch)
async def run_async(self):
count = 0
while count < self.args.num_messages:
batch = await self.async_receiver.receive_messages(
max_message_count=self.args.num_messages - count,
max_wait_time=self.args.max_wait_time or None)
if self.args.peeklock:
await asyncio.gather(*[self.async_receiver.complete_message(m) for m in batch])
count += len(batch)
| mit | -8,110,725,861,099,683,000 | 41.709677 | 95 | 0.527946 | false |
slogan621/tscharts | tscharts/views.py | 1 | 9381 | #(C) Copyright Syd Logan 2016-2021
#(C) Copyright Thousand Smiles Foundation 2016-2021
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.authtoken.models import Token
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
import json
from common.decorators import *
from pin.models import PIN
import traceback
import logging
LOG = logging.getLogger("tscharts")
class LoginView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
badRequest = False
forbidden = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "password" in data and not "pin" in data:
badRequest = True
if not badRequest:
username = data['username']
if "password" in data:
password = data['password']
user = authenticate(username=username, password=password)
else:
LOG.error("traceback 0 {}".format(traceback.print_stack()))
user = User.objects.get(username=username)
if user:
LOG.error("traceback 1 {}".format(traceback.print_stack()))
pin = PIN.objects.get(user=user.id)
if pin:
LOG.error("traceback 2 {}".format(traceback.print_stack()))
if not pin.user == user:
LOG.error("traceback 3 {}".format(traceback.print_stack()))
user = None
elif not pin.pin == data["pin"]:
LOG.error("traceback 4 {}".format(traceback.print_stack()))
user = None
else:
LOG.error("traceback 5 {}".format(traceback.print_stack()))
user = None
if user:
LOG.error("traceback 6 {}".format(traceback.print_stack()))
if user.is_active:
# XXX hack
try:
if not user.backend:
user.backend = 'django.contrib.auth.backends.ModelBackend'
except:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
else:
LOG.error("traceback 7 {}".format(traceback.print_stack()))
forbidden = True
else:
LOG.error("traceback 8 {}".format(traceback.print_stack()))
forbidden = True
if not forbidden and not badRequest:
token = Token.objects.get_or_create(user=user)[0]
return JsonResponse({"token": "{}".format(token.key),
"id": "{}".format(user.id)})
elif forbidden:
return HttpResponseForbidden()
else:
return HttpResponseBadRequest()
class LogoutView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
logout(request)
return HttpResponse()
class CreateUserView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
badRequest = False
duplicateUser = False
implError = False
data = json.loads(request.body)
if not "first" in data:
badRequest = True
if not "last" in data:
badRequest = True
if not "password" in data:
badRequest = True
if not "email" in data:
badRequest = True
if not "pin" in data:
badRequest = True
if not badRequest:
first = data['first']
last = data['last']
password = data['password']
email = data['email']
try:
user = User.objects.get(username=email)
except:
user = None
if user:
badRequest = True
duplicateUser = True
if not badRequest:
try:
user = User.objects.create_user(email, email, password)
user.is_active = True
user.first_name = first
user.last_name = last
user.save()
except:
user = None
if user:
kwargs = {}
kwargs["pin"] = data['pin']
kwargs["user"] = user
try:
pin = PIN(**kwargs)
if pin:
pin.save()
except:
pin = None
if not pin:
implMsg = "Unable to create PIN"
implError = True
else:
implMsg = "Unable to create user"
implError = True
if badRequest:
if duplicateUser:
r = HttpResponse(status=status.HTTP_409_CONFLICT, reason="User (%d) already exists".format(user.id))
return r
else:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
else:
return Response({'id': user.id})
class UpdatePINView(APIView):
'''
XXX in insecure deployments, like the web, the following lines should
be commented out so that only an authenticated user can modify a PIN
authentication_classes = ()
permission_classes = ()
'''
authentication_classes = ()
permission_classes = ()
@log_request
def put(self, request, format=None):
badRequest = False
notFound = False
implError = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "pin" in data:
badRequest = True
if not badRequest:
pin = data['pin']
username = data['username']
try:
user = User.objects.get(username=username)
except:
user = None
if not user:
notFound = True
if not badRequest and not notFound:
try:
pinobj = PIN.objects.get_or_create(user=user)[0]
except:
pinobj = None
if pinobj:
pinobj.pin = pin
pinobj.save()
else:
implError = True
implMsg = "Unable to update PIN"
if badRequest:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
elif notFound:
return HttpResponseNotFound()
else:
return Response({})
class UpdatePasswordView(APIView):
'''
XXX in insecure deployments, like the web, the following lines should
be commented out so that only an authenticated user can modify a password
authentication_classes = ()
permission_classes = ()
'''
authentication_classes = ()
permission_classes = ()
@log_request
def put(self, request, format=None):
badRequest = False
notFound = False
implError = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "password" in data:
badRequest = True
if not badRequest:
password = data['password']
username = data['username']
try:
user = User.objects.get(username=username)
except:
user = None
if not user:
notFound = True
if not badRequest and not notFound:
try:
user.set_password(password)
user.save()
except:
implError = True
implMsg = "Unable to update password"
if badRequest:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
elif notFound:
return HttpResponseNotFound()
else:
return Response({})
| apache-2.0 | 1,100,830,941,482,948,100 | 30.166113 | 130 | 0.538109 | false |
wanglei828/apollo | modules/tools/plot_planning/speed_dsteering_data.py | 1 | 3396 | #!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
from record_reader import RecordItemReader
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.canbus.proto import chassis_pb2
class SpeedDsteeringData:
def __init__(self):
self.last_steering_percentage = None
self.last_speed_mps = None
self.last_timestamp_sec = None
self.speed_data = []
self.d_steering_data = []
def add(self, chassis):
steering_percentage = chassis.steering_percentage
speed_mps = chassis.speed_mps
timestamp_sec = chassis.header.timestamp_sec
if self.last_timestamp_sec is None:
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
return
if (timestamp_sec - self.last_timestamp_sec) > 0.02:
d_steering = (steering_percentage - self.last_steering_percentage) \
/ (timestamp_sec - self.last_timestamp_sec)
self.speed_data.append(speed_mps)
self.d_steering_data.append(d_steering)
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
def get_speed_dsteering(self):
return self.speed_data, self.d_steering_data
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots()
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = SpeedDsteeringData()
last_pose_data = None
last_chassis_data = None
topics = ["/apollo/localization/pose", "/apollo/canbus/chassis"]
for data in reader.read(topics):
if "chassis" in data:
last_chassis_data = data["chassis"]
if last_chassis_data is not None:
processor.add(last_chassis_data)
#last_pose_data = None
#last_chassis_data = None
data_x, data_y = processor.get_speed_dsteering()
ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.2)
plt.show()
| apache-2.0 | -6,538,834,778,381,319,000 | 37.157303 | 80 | 0.592756 | false |
PanDAWMS/panda-server | pandaserver/daemons/scripts/copyArchive.py | 1 | 71078 | import os
import re
import sys
import time
import fcntl
import shelve
import datetime
import traceback
import requests
from urllib3.exceptions import InsecureRequestWarning
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer import EventServiceUtils
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.jobdispatcher.Watcher import Watcher
from pandaserver.brokerage.SiteMapper import SiteMapper
# from pandaserver.dataservice.MailUtils import MailUtils
from pandaserver.srvcore.CoreUtils import commands_get_status_output
from pandaserver.config import panda_config
# logger
_logger = PandaLogger().getLogger('copyArchive')
# main
def main(argv=tuple(), tbuf=None, **kwargs):
# password
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# # kill old dq2 process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output(
# 'ps axo user,pid,lstart,args | grep dq2.clientapi | grep -v PYTHONPATH | grep -v grep')[-1]
# for line in out.split('\n'):
# if line == '':
# continue
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old dq2 process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill dq2 process : %s %s" % (type,value))
#
#
# # kill old process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=7)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output('ps axo user,pid,lstart,args | grep %s' % scriptName)[-1]
# for line in out.split('\n'):
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill process : %s %s" % (type,value))
# instantiate TB
# if tbuf is None:
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# else:
# taskBuffer = tbuf
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# send email for access requests
_logger.debug("Site Access")
try:
# get contact
contactAddr = {}
siteContactAddr = {}
sql = "SELECT name,email FROM ATLAS_PANDAMETA.cloudconfig"
status,res = taskBuffer.querySQLS(sql,{})
for cloudName,cloudEmail in res:
contactAddr[cloudName] = cloudEmail
# get requests
sql = "SELECT pandaSite,status,dn FROM ATLAS_PANDAMETA.siteaccess WHERE status IN (:status1,:status2,:status3) "
sql += "ORDER BY pandaSite,status "
varMap = {}
varMap[':status1'] = 'requested'
varMap[':status2'] = 'tobeapproved'
varMap[':status3'] = 'toberejected'
status,res = taskBuffer.querySQLS(sql,varMap)
requestsInCloud = {}
# mailUtils = MailUtils()
# loop over all requests
for pandaSite,reqStatus,userName in res:
cloud = siteMapper.getSite(pandaSite).cloud
_logger.debug("request : '%s' site=%s status=%s cloud=%s" % (userName,pandaSite,reqStatus,cloud))
# send emails to user
if reqStatus in ['tobeapproved','toberejected']:
# set status
if reqStatus == 'tobeapproved':
newStatus = 'approved'
else:
newStatus = 'rejected'
# get mail address for user
userMailAddr = ''
sqlUM = "SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:userName"
varMap = {}
varMap[':userName'] = userName
stUM,resUM = taskBuffer.querySQLS(sqlUM,varMap)
if resUM is None or len(resUM) == 0:
_logger.error("email address is unavailable for '%s'" % userName)
else:
userMailAddr = resUM[0][0]
# send
# if userMailAddr not in ['',None,'None','notsend']:
# _logger.debug("send update to %s" % userMailAddr)
# retMail = mailUtils.sendSiteAccessUpdate(userMailAddr,newStatus,pandaSite)
# _logger.debug(retMail)
# update database
sqlUp = "UPDATE ATLAS_PANDAMETA.siteaccess SET status=:newStatus "
sqlUp += "WHERE pandaSite=:pandaSite AND dn=:userName"
varMap = {}
varMap[':userName'] = userName
varMap[':newStatus'] = newStatus
varMap[':pandaSite'] = pandaSite
stUp,resUp = taskBuffer.querySQLS(sqlUp,varMap)
else:
# append cloud
requestsInCloud.setdefault(cloud, {})
# append site
requestsInCloud[cloud].setdefault(pandaSite, [])
# append user
requestsInCloud[cloud][pandaSite].append(userName)
# send requests to the cloud responsible
for cloud in requestsInCloud:
requestsMap = requestsInCloud[cloud]
_logger.debug("requests for approval : cloud=%s" % cloud)
# send
if cloud in contactAddr and contactAddr[cloud] not in ['',None,'None']:
# get site contact
for pandaSite in requestsMap:
userNames = requestsMap[pandaSite]
if pandaSite not in siteContactAddr:
varMap = {}
varMap[':siteid'] = pandaSite
sqlSite = "SELECT email FROM ATLAS_PANDAMETA.schedconfig WHERE siteid=:siteid AND rownum<=1"
status,res = taskBuffer.querySQLS(sqlSite,varMap)
siteContactAddr[pandaSite] = res[0][0]
# append
if siteContactAddr[pandaSite] not in ['',None,'None']:
contactAddr[cloud] += ',%s' % siteContactAddr[pandaSite]
else:
_logger.error("contact email address is unavailable for %s" % cloud)
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("Failed with %s %s" % (type,value))
_logger.debug("Site Access : done")
# finalize failed jobs
_logger.debug("AnalFinalizer session")
try:
# get min PandaID for failed jobs in Active table
sql = "SELECT MIN(PandaID),prodUserName,jobDefinitionID,jediTaskID,computingSite FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus "
sql += "GROUP BY prodUserName,jobDefinitionID,jediTaskID,computingSite "
varMap = {}
varMap[':jobStatus'] = 'failed'
varMap[':prodSourceLabel'] = 'user'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is not None:
# loop over all user/jobdefID
for pandaID,prodUserName,jobDefinitionID,jediTaskID,computingSite in res:
# check
_logger.debug("check finalization for %s task=%s jobdefID=%s site=%s" % (prodUserName,jediTaskID,
jobDefinitionID,
computingSite))
sqlC = "SELECT COUNT(*) FROM ("
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += "UNION "
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += ") "
varMap = {}
varMap[':jobStatus1'] = 'failed'
varMap[':jobStatus2'] = 'merging'
varMap[':prodSourceLabel'] = 'user'
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
statC,resC = taskBuffer.querySQLS(sqlC,varMap)
# finalize if there is no non-failed jobs
if resC is not None:
_logger.debug("n of non-failed jobs : %s" % resC[0][0])
if resC[0][0] == 0:
jobSpecs = taskBuffer.peekJobs([pandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
if jobSpec is None:
_logger.debug("skip PandaID={0} not found in jobsActive".format(pandaID))
continue
_logger.debug("finalize %s %s" % (prodUserName,jobDefinitionID))
finalizedFlag = taskBuffer.finalizePendingJobs(prodUserName,jobDefinitionID)
_logger.debug("finalized with %s" % finalizedFlag)
if finalizedFlag and jobSpec.produceUnMerge():
# collect sub datasets
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets")
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList)
else:
_logger.debug("n of non-failed jobs : None")
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("AnalFinalizer failed with %s %s" % (errType,errValue))
# finalize failed jobs
_logger.debug("check stuck mergeing jobs")
try:
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get PandaIDs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus'] = 'merging'
varMap[':timeLimit'] = timeLimit
sql = "SELECT distinct jediTaskID FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus and modificationTime<:timeLimit "
tmp,res = taskBuffer.querySQLS(sql,varMap)
checkedDS = set()
for jediTaskID, in res:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':dsType'] = 'trn_log'
sql = "SELECT datasetID FROM ATLAS_PANDA.JEDI_Datasets WHERE jediTaskID=:jediTaskID AND type=:dsType AND nFilesUsed=nFilesTobeUsed "
tmpP,resD = taskBuffer.querySQLS(sql,varMap)
for datasetID, in resD:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':fileStatus'] = 'ready'
varMap[':datasetID'] = datasetID
sql = "SELECT PandaID FROM ATLAS_PANDA.JEDI_Dataset_Contents "
sql += "WHERE jediTaskID=:jediTaskID AND datasetid=:datasetID AND status=:fileStatus AND PandaID=OutPandaID AND rownum<=1 "
tmpP,resP = taskBuffer.querySQLS(sql,varMap)
if resP == []:
continue
PandaID = resP[0][0]
varMap = {}
varMap[':PandaID'] = PandaID
varMap[':fileType'] = 'log'
sql = "SELECT d.status FROM ATLAS_PANDA.filesTable4 f,ATLAS_PANDA.datasets d WHERE PandaID=:PandaID AND f.type=:fileType AND d.name=f.destinationDBlock "
tmpS,resS = taskBuffer.querySQLS(sql,varMap)
if resS is not None:
subStatus, = resS[0]
if subStatus in ['completed']:
jobSpecs = taskBuffer.peekJobs([PandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets for jediTaskID={0} PandaID={1}".format(jediTaskID,PandaID))
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList,updateCompleted=True)
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("check for stuck merging jobs failed with %s %s" % (errType,errValue))
# get sites to skip various timeout
varMap = {}
varMap[':status'] = 'paused'
sql = "SELECT siteid FROM ATLAS_PANDAMETA.schedconfig WHERE status=:status "
sitesToSkipTO = set()
status,res = taskBuffer.querySQLS(sql,varMap)
for siteid, in res:
sitesToSkipTO.add(siteid)
_logger.debug("PQs to skip timeout : {0}".format(','.join(sitesToSkipTO)))
sitesToDisableReassign = set()
# get sites to disable reassign
for siteName in siteMapper.siteSpecList:
siteSpec = siteMapper.siteSpecList[siteName]
if siteSpec.capability == 'ucore' and not siteSpec.is_unified:
continue
if siteSpec.disable_reassign():
sitesToDisableReassign.add(siteName)
_logger.debug("PQs to disable reassign : {0}".format(','.join(sitesToDisableReassign)))
_memoryCheck("watcher")
_logger.debug("Watcher session")
# get the list of workflows
sql = "SELECT DISTINCT workflow FROM ATLAS_PANDAMETA.schedconfig WHERE status='online' "
status, res = taskBuffer.querySQLS(sql, {})
workflow_timeout_map = {}
for workflow, in res + [('production',), ('analysis',)]:
timeout = taskBuffer.getConfigValue('watcher', 'HEARTBEAT_TIMEOUT_{0}'.format(workflow), 'pandaserver', 'atlas')
if timeout is not None:
workflow_timeout_map[workflow] = timeout
elif workflow in ['production', 'analysis']:
workflow_timeout_map[workflow] = 2
workflows = list(workflow_timeout_map)
_logger.debug("timeout : {0}".format(str(workflow_timeout_map)))
# check heartbeat for analysis jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2) "
sql += "AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Anal Watcher : %s" % res)
else:
_logger.debug("# of Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for analysis jobs in transferring
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'transferring'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "
sql += "AND jobStatus=:jobStatus1 AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Transferring Anal Watcher : %s" % res)
else:
_logger.debug("# of Transferring Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Trans Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for sent jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
varMap = {}
varMap[':jobStatus'] = 'sent'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND modificationTime<:modificationTime",
varMap)
if res is None:
_logger.debug("# of Sent Watcher : %s" % res)
else:
_logger.debug("# of Sent Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Sent Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=30,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for 'holding' analysis/ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# get XMLs
xmlIDs = set()
# xmlFiles = os.listdir(panda_config.logdir)
# for file in xmlFiles:
# match = re.search('^(\d+)_([^_]+)_.{36}$',file)
# if match is not None:
# id = match.group(1)
# xmlIDs.append(int(id))
job_output_report_list = taskBuffer.listJobOutputReport()
if job_output_report_list is not None:
for panda_id, job_status, attempt_nr, time_stamp in job_output_report_list:
xmlIDs.add(int(panda_id))
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime)) AND (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND stateChangeTime != modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':prodSourceLabel3'] = 'ddm'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Anal/DDM Watcher : %s" % res)
else:
_logger.debug("# of Holding Anal/DDM Watcher : %s - XMLs : %s" % (len(res),len(xmlIDs)))
for (id,) in res:
_logger.debug("Holding Anal/DDM Watcher %s" % id)
if int(id) in xmlIDs:
_logger.debug(" found XML -> skip %s" % id)
continue
thr = Watcher(taskBuffer,id,single=True,sleepTime=180,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for high prio production jobs
timeOutVal = 3
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND currentPriority>:pLimit "
sql += "AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':pLimit'] = 800
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of High prio Holding Watcher : %s" % res)
else:
_logger.debug("# of High prio Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("High prio Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
timeOutVal = 48
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Watcher : %s" % res)
else:
_logger.debug("# of Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs with internal stage-out
sql = "SELECT PandaID,jobStatus,jobSubStatus FROM ATLAS_PANDA.jobsActive4 j,ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus=:jobStatus1 AND jobSubStatus IS NOT NULL AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':jobStatus1'] = 'transferring'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID, jobStatus, jobSubStatus in res:
_logger.debug("Internal Staging Watcher %s %s:%s" % (pandaID, jobStatus, jobSubStatus))
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
sql = "SELECT PandaID,jobStatus,j.computingSite FROM ATLAS_PANDA.jobsActive4 j, ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID,jobStatus,computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug("skip General Watcher for PandaID={0} at {1} since timeout is disabled for {2}".format(pandaID,computingSite,jobStatus))
continue
_logger.debug("General Watcher %s" % pandaID)
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
_memoryCheck("reassign")
# kill long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
status,res = taskBuffer.querySQLS("SELECT PandaID,cloud,prodSourceLabel FROM ATLAS_PANDA.jobsDefined4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs=[]
dashFileMap = {}
if res is not None:
for pandaID,cloud,prodSourceLabel in res:
# collect PandaIDs
jobs.append(pandaID)
if len(jobs):
_logger.debug("killJobs for Defined (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'activated'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("killJobs for Active (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting ddm jobs for dispatch
_logger.debug("kill PandaMovers")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
sql = "SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND transferType=:transferType AND creationTime<:creationTime"
varMap = {}
varMap[':creationTime'] = timeLimit
varMap[':prodSourceLabel'] = 'ddm'
varMap[':transferType'] = 'dis'
_logger.debug(sql+str(varMap))
status,res = taskBuffer.querySQLS(sql,varMap)
_logger.debug(res)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("kill DDM Jobs (%s)" % str(jobs))
Client.killJobs(jobs,2)
# reassign activated jobs in inactive sites
inactiveTimeLimitSite = 2
inactiveTimeLimitJob = 4
inactivePrioLimit = 800
timeLimitSite = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitSite)
timeLimitJob = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitJob)
# get PandaIDs
sql = 'SELECT distinct computingSite FROM ATLAS_PANDA.jobsActive4 '
sql += 'WHERE prodSourceLabel=:prodSourceLabel '
sql += 'AND ((modificationTime<:timeLimit AND jobStatus=:jobStatus1) '
sql += 'OR (stateChangeTime<:timeLimit AND jobStatus=:jobStatus2)) '
sql += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sql += 'AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stDS,resDS = taskBuffer.querySQLS(sql,varMap)
sqlSS = 'SELECT laststart FROM ATLAS_PANDAMETA.siteData '
sqlSS += 'WHERE site=:site AND flag=:flag AND hours=:hours AND laststart<:laststart '
sqlPI = 'SELECT PandaID,eventService,attemptNr FROM ATLAS_PANDA.jobsActive4 '
sqlPI += 'WHERE prodSourceLabel=:prodSourceLabel AND jobStatus IN (:jobStatus1,:jobStatus2) '
sqlPI += 'AND (modificationTime<:timeLimit OR stateChangeTime<:timeLimit) '
sqlPI += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sqlPI += 'AND computingSite=:site AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
for tmpSite, in resDS:
if tmpSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs at inactive site %s since reassign is disabled' % (tmpSite))
continue
# check if the site is inactive
varMap = {}
varMap[':site'] = tmpSite
varMap[':flag'] = 'production'
varMap[':hours'] = 3
varMap[':laststart'] = timeLimitSite
stSS,resSS = taskBuffer.querySQLS(sqlSS,varMap)
if stSS is not None and len(resSS) > 0:
# get jobs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':site'] = tmpSite
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stPI,resPI = taskBuffer.querySQLS(sqlPI,varMap)
jediJobs = []
# reassign
_logger.debug('reassignJobs for JEDI at inactive site %s laststart=%s' % (tmpSite,resSS[0][0]))
if resPI is not None:
for pandaID, eventService, attemptNr in resPI:
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying es merge %s at inactive site %s' % (pandaID,tmpSite))
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at inactive site %s (%s)' % (tmpSite,jediJobs[iJob:iJob+nJob]))
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign defined jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=4)
# get PandaIDs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,['defined'],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for defined jobs -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for defined jobs (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI defined jobs -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI defined jobs (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,[],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for long in defined table -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long in defined table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long JEDI in defined table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long JEDI in defined table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T1
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
for tmpCloud in siteMapper.getCloudList():
# ignore special clouds
if tmpCloud in ['CERN','OSG']:
continue
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[siteMapper.getCloud(tmpCloud)['tier1']],[],
True,onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T1 evgensimul in %s -> #%s' % (tmpCloud,len(jobs)))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T1 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T1 JEDI evgensimul in %s -> #%s' % (tmpCloud,len(jediJobs)))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T1 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T2
try:
_logger.debug('looking for stuck T2s to reassign evgensimul')
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
varMap = {}
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'running'
varMap[':prodSourceLabel'] = 'managed'
varMap[':processingType1'] = 'evgen'
varMap[':processingType2'] = 'simul'
sql = "SELECT cloud,computingSite,jobStatus,COUNT(*) FROM ATLAS_PANDA.jobsActive4 "\
"WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND prodSourceLabel=:prodSourceLabel "\
"AND processingType IN (:processingType1,:processingType2) GROUP BY cloud,computingSite,jobStatus "
status,res = taskBuffer.querySQLS(sql, varMap)
if res is not None:
# get ratio of activated/running
siteStatData = {}
for tmpCloud,tmpComputingSite,tmpJobStatus,tmpCount in res:
# skip T1
if tmpComputingSite == siteMapper.getCloud(tmpCloud)['tier1']:
continue
# skip if reassign is disabled
if tmpComputingSite in sitesToDisableReassign:
continue
# add cloud/site
tmpKey = (tmpCloud,tmpComputingSite)
if tmpKey not in siteStatData:
siteStatData[tmpKey] = {'activated':0,'running':0}
# add the number of jobs
if tmpJobStatus in siteStatData[tmpKey]:
siteStatData[tmpKey][tmpJobStatus] += tmpCount
# look for stuck site
stuckThr = 10
stuckSites = []
for tmpKey in siteStatData:
tmpStatData = siteStatData[tmpKey]
if tmpStatData['running'] == 0 or \
float(tmpStatData['activated'])/float(tmpStatData['running']) > stuckThr:
tmpCloud,tmpComputingSite = tmpKey
_logger.debug(' %s:%s %s/%s > %s' % (tmpCloud,tmpComputingSite,tmpStatData['activated'],tmpStatData['running'],stuckThr))
# get stuck jobs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[tmpComputingSite],[tmpCloud],True,
onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T2 evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jobs)))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T2 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T2 JEDI evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jediJobs)))
if len(jediJobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T2 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("failed to reassign T2 evgensimul with %s:%s" % (errType,errValue))
# reassign too long activated jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=2)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],[],[],[],True,
onlyReassignable=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long activated PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying {0} in long activated' % pandaID)
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long activated in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long activated in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long activated JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long activated JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long starting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['starting'],['managed'],[],[],[],True,
onlyReassignable=True,useStateChangeTime=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long starting PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long starting in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long starting in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long starting JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long stating JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# kill too long-standing analysis jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':prodSourceLabel1'] = 'test'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':prodSourceLabel3'] = 'user'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND modificationTime<:modificationTime ORDER BY PandaID",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for Anal Active (%s)" % str(jobs))
# kill too long pending jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'pending'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Pending (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kick waiting ES merge jobs which were generated from fake co-jumbo
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':esMerge'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID,computingSite FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND eventService=:esMerge ORDER BY jediTaskID "
status,res = taskBuffer.querySQLS(sql, varMap)
jobsMap = {}
if res is not None:
for id,site in res:
if site not in jobsMap:
jobsMap[site] = []
jobsMap[site].append(id)
# kick
if len(jobsMap):
for site in jobsMap:
jobs = jobsMap[site]
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("kick waiting ES merge (%s)" % str(jobs[iJob:iJob+nJob]))
Client.reassignJobs(jobs[iJob:iJob+nJob], )
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND (eventService IS NULL OR eventService<>:coJumbo) "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Waiting (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kill too long running ES jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esJob'] = EventServiceUtils.esJobFlagNumber
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService IN (:esJob,:coJumbo) AND currentPriority>=900 "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2, keepUnmerged=True, jobSubStatus='es_toolong')
iJob += nJob
# kill too long running ES merge jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esMergeJob'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService=:esMergeJob "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES merge jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2)
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE ((creationTime<:timeLimit AND (eventService IS NULL OR eventService<>:coJumbo)) "
sql += "OR modificationTime<:timeLimit) "
varMap = {}
varMap[':timeLimit'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,4)
_logger.debug("killJobs in jobsWaiting (%s)" % str(jobs))
# rebrokerage
_logger.debug("Rebrokerage start")
# get timeout value
timeoutVal = taskBuffer.getConfigValue('rebroker','ANALY_TIMEOUT')
if timeoutVal is None:
timeoutVal = 12
_logger.debug("timeout value : {0}h".format(timeoutVal))
try:
normalTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeoutVal)
sortTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
sql = "WITH p AS ("\
"SELECT MIN(PandaID) PandaID,jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
"FROM ATLAS_PANDA.jobsActive4 "\
"WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "\
"AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3) "\
"AND jobsetID IS NOT NULL AND lockedBy=:lockedBy "\
"GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
") "\
"SELECT /*+ INDEX (s JOBS_STATUSLOG_PANDAID_IDX) */ "\
"p.jobDefinitionID,p.prodUserName,p.prodUserID,p.computingSite,s.modificationTime,p.jediTaskID,p.processingType " \
"FROM p, ATLAS_PANDA.jobs_statuslog s "\
"WHERE s.PandaID=p.PandaID AND s.jobStatus=:s_jobStatus AND s.modificationTime<:modificationTime "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'dummy'
varMap[':jobStatus3'] = 'starting'
varMap[':s_jobStatus'] = 'activated'
# get jobs older than threshold
ret,res = taskBuffer.querySQLS(sql, varMap)
resList = []
keyList = set()
if res is not None:
for tmpItem in res:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
keyList.add(tmpKey)
resList.append(tmpItem)
# get stalled assigned job
sqlA = "SELECT jobDefinitionID,prodUserName,prodUserID,computingSite,MAX(creationTime),jediTaskID,processingType "
sqlA += "FROM ATLAS_PANDA.jobsDefined4 "
sqlA += "WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) AND jobStatus IN (:jobStatus1,:jobStatus2) "
sqlA += "AND creationTime<:modificationTime AND lockedBy=:lockedBy "
sqlA += "GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'assigned'
varMap[':jobStatus2'] = 'defined'
retA,resA = taskBuffer.querySQLS(sqlA, varMap)
if resA is not None:
for tmpItem in resA:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
if tmpKey not in keyList:
keyList.add(tmpKey)
resList.append(tmpItem)
# sql to check recent activity
sql = "SELECT PandaID,stateChangeTime,jobStatus FROM %s "
sql += "WHERE prodUserName=:prodUserName AND jobDefinitionID=:jobDefinitionID "
sql += "AND computingSite=:computingSite AND jediTaskID=:jediTaskID "
sql += "AND jobStatus NOT IN (:jobStatus1,:jobStatus2,:jobStatus3) "
sql += "AND stateChangeTime>:modificationTime "
sql += "AND rownum <= 1"
# sql to get associated jobs with jediTaskID
sqlJJ = "SELECT PandaID FROM %s "
sqlJJ += "WHERE jediTaskID=:jediTaskID AND jobStatus IN (:jobS1,:jobS2,:jobS3,:jobS4,:jobS5) "
sqlJJ += "AND jobDefinitionID=:jobDefID AND computingSite=:computingSite "
if resList != []:
recentRuntimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# loop over all user/jobID combinations
iComb = 0
nComb = len(resList)
_logger.debug("total combinations = %s" % nComb)
for jobDefinitionID,prodUserName,prodUserID,computingSite,maxModificationTime,jediTaskID,processingType in resList:
# check if jobs with the jobID have run recently
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
varMap[':modificationTime'] = recentRuntimeLimit
varMap[':jobStatus1'] = 'closed'
varMap[':jobStatus2'] = 'failed'
varMap[':jobStatus3'] = 'starting'
_logger.debug(" rebro:%s/%s:ID=%s:%s jediTaskID=%s site=%s" % (iComb,nComb,jobDefinitionID,
prodUserName,jediTaskID,
computingSite))
iComb += 1
hasRecentJobs = False
# check site
if not siteMapper.checkSite(computingSite):
_logger.debug(" -> skip unknown site=%s" % computingSite)
continue
# check site status
tmpSiteStatus = siteMapper.getSite(computingSite).status
if tmpSiteStatus not in ['offline','test']:
# use normal time limit for normal site status
if maxModificationTime > normalTimeLimit:
_logger.debug(" -> skip wait for normal timelimit=%s<maxModTime=%s" % (normalTimeLimit,maxModificationTime))
continue
for tableName in ['ATLAS_PANDA.jobsActive4','ATLAS_PANDA.jobsArchived4']:
retU,resU = taskBuffer.querySQLS(sql % tableName, varMap)
if resU is None:
# database error
raise RuntimeError("failed to check modTime")
if resU != []:
# found recent jobs
hasRecentJobs = True
_logger.debug(" -> skip due to recent activity %s to %s at %s" % (resU[0][0],
resU[0][2],
resU[0][1]))
break
else:
_logger.debug(" -> immediate rebro due to site status=%s" % tmpSiteStatus)
if hasRecentJobs:
# skip since some jobs have run recently
continue
else:
if jediTaskID is None:
_logger.debug(" -> rebro for normal task : no action")
else:
_logger.debug(" -> rebro for JEDI task")
killJobs = []
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':jobDefID'] = jobDefinitionID
varMap[':computingSite'] = computingSite
varMap[':jobS1'] = 'defined'
varMap[':jobS2'] = 'assigned'
varMap[':jobS3'] = 'activated'
varMap[':jobS4'] = 'dummy'
varMap[':jobS5'] = 'starting'
for tableName in ['ATLAS_PANDA.jobsDefined4','ATLAS_PANDA.jobsActive4']:
retJJ,resJJ = taskBuffer.querySQLS(sqlJJ % tableName, varMap)
for tmpPandaID, in resJJ:
killJobs.append(tmpPandaID)
# reverse sort to kill buildJob in the end
killJobs.sort()
killJobs.reverse()
# kill to reassign
taskBuffer.killJobs(killJobs,'JEDI','51',True)
except Exception as e:
_logger.error("rebrokerage failed with {0} : {1}".format(str(e), traceback.format_exc()))
# kill too long running jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=21)
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
# set tobekill
_logger.debug('killJobs for Running (%s)' % jobs[iJob:iJob+nJob])
Client.killJobs(jobs[iJob:iJob+nJob],2)
# run watcher
for id in jobs[iJob:iJob+nJob]:
thr = Watcher(taskBuffer,id,single=True,sitemapper=siteMapper,sleepTime=60*24*21)
thr.start()
thr.join()
time.sleep(1)
iJob += nJob
time.sleep(10)
# kill too long waiting ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=5)
varMap = {}
varMap[':prodSourceLabel'] = 'ddm'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for DDM (%s)" % str(jobs))
# kill too long throttled jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'throttled'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime ",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for throttled (%s)" % str(jobs))
# check if merge job is valid
_logger.debug('kill invalid pmerge')
varMap = {}
varMap[':processingType'] = 'pmerge'
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
sql = "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsDefined4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
sql += "UNION "
sql += "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsActive4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
status,res = taskBuffer.querySQLS(sql,varMap)
nPmerge = 0
badPmerge = 0
_logger.debug('check {0} pmerge'.format(len(res)))
for pandaID,jediTaskID in res:
nPmerge += 1
isValid,tmpMsg = taskBuffer.isValidMergeJob(pandaID,jediTaskID)
if isValid is False:
_logger.debug("kill pmerge {0} since {1} gone".format(pandaID,tmpMsg))
taskBuffer.killJobs([pandaID],'killed since pre-merge job {0} gone'.format(tmpMsg),
'52',True)
badPmerge += 1
_logger.debug('killed invalid pmerge {0}/{1}'.format(badPmerge,nPmerge))
# cleanup of jumbo jobs
_logger.debug('jumbo job cleanup')
res = taskBuffer.cleanupJumboJobs()
_logger.debug(res)
_memoryCheck("delete XML")
# delete old files in DA cache
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
files = os.listdir(panda_config.cache_dir)
for file in files:
# skip special test file
if file == 'sources.72c48dc5-f055-43e5-a86e-4ae9f8ea3497.tar.gz':
continue
if file == 'sources.090f3f51-fc81-4e80-9749-a5e4b2bd58de.tar.gz':
continue
try:
# get timestamp
timestamp = datetime.datetime.fromtimestamp(os.stat('%s/%s' % (panda_config.cache_dir,file)).st_mtime)
# delete
if timestamp < timeLimit:
_logger.debug("delete %s " % file)
os.remove('%s/%s' % (panda_config.cache_dir,file))
except Exception:
pass
_memoryCheck("delete core")
# delete core
dirName = '%s/..' % panda_config.logdir
for file in os.listdir(dirName):
if file.startswith('core.'):
_logger.debug("delete %s " % file)
try:
os.remove('%s/%s' % (dirName,file))
except Exception:
pass
# update email DB
_memoryCheck("email")
_logger.debug("Update emails")
# lock file
_lockGetMail = open(panda_config.lockfile_getMail, 'w')
# lock email DB
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_EX)
# open email DB
pDB = shelve.open(panda_config.emailDB)
# read
mailMap = {}
for name in pDB:
addr = pDB[name]
mailMap[name] = addr
# close DB
pDB.close()
# release file lock
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_UN)
# set email address
for name in mailMap:
addr = mailMap[name]
# remove _
name = re.sub('_$','',name)
status,res = taskBuffer.querySQLS("SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:name",{':name':name})
# failed or not found
if status == -1 or len(res) == 0:
_logger.error("%s not found in user DB" % name)
continue
# already set
if res[0][0] not in ['','None',None]:
continue
# update email
_logger.debug("set '%s' to %s" % (name,addr))
status,res = taskBuffer.querySQLS("UPDATE ATLAS_PANDAMETA.users SET email=:addr WHERE name=:name",{':addr':addr,':name':name})
# sandbox
_logger.debug("Touch sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
sqlC = "SELECT hostName,fileName,creationTime,userName FROM ATLAS_PANDAMETA.userCacheUsage "\
"WHERE creationTime>:timeLimit AND creationTime>modificationTime "\
"AND (fileName like 'sources%' OR fileName like 'jobO%') "
sqlU = "UPDATE ATLAS_PANDAMETA.userCacheUsage SET modificationTime=CURRENT_DATE "\
"WHERE userName=:userName AND fileName=:fileName "
status, res = taskBuffer.querySQLS(sqlC, {':timeLimit': timeLimit})
if res is None:
_logger.error("failed to get files")
elif len(res) > 0:
_logger.debug("{0} files to touch".format(len(res)))
for hostName, fileName, creationTime, userName in res:
base_url = 'https://{0}:{1}'.format(hostName, panda_config.pserverport)
_logger.debug("touch {0} on {1} created at {2}".format(fileName, hostName, creationTime))
s,o = Client.touchFile(base_url, fileName)
_logger.debug(o)
if o == 'True':
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
taskBuffer.querySQLS(sqlU, varMap)
_logger.debug("Check sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
expireLimit = datetime.datetime.utcnow() - datetime.timedelta(days=30)
sqlD = "DELETE FROM ATLAS_PANDAMETA.userCacheUsage WHERE userName=:userName AND fileName=:fileName "
nRange = 100
for i in range(nRange):
_logger.debug("{0}/{1} {2} files to check".format(nRange, i, len(res)))
res = taskBuffer.getLockSandboxFiles(timeLimit, 1000)
if res is None:
_logger.error("failed to get files")
break
elif len(res) == 0:
break
for userName, hostName, fileName, creationTime, modificationTime in res:
url = 'https://{0}:{1}/cache/{2}'.format(hostName, panda_config.pserverport, fileName)
_logger.debug("checking {0} created at {1}".format(url, creationTime))
toDelete = False
try:
x = requests.head(url, verify=False)
_logger.debug("code {0}".format(x.status_code))
if x.status_code == 404:
_logger.debug("delete")
toDelete = True
except Exception as e:
_logger.debug("failed with {0}".format(str(e)))
if creationTime < expireLimit:
toDelete = True
_logger.debug("delete due to creationTime={0}".format(creationTime))
# update or delete
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
if toDelete:
taskBuffer.querySQLS(sqlD, varMap)
else:
_logger.debug("keep")
_memoryCheck("end")
_logger.debug("===================== end =====================")
# run
if __name__ == '__main__':
main(argv=sys.argv)
| apache-2.0 | 4,297,887,075,583,032,000 | 45.885224 | 323 | 0.571991 | false |
mvendra/mvtools | security/hash_check.py | 1 | 1222 | #!/usr/bin/env python3
import sys
import os
import sha256_wrapper
def puaq():
print("Usage: %s archive-to-check [hash-file]" % os.path.basename(__file__))
sys.exit(1)
def sha256sum_check(archive_file, hash_file):
hash_file_contents = ""
with open(hash_file, "r") as f:
hash_file_contents = f.read()
v, r = sha256_wrapper.hash_sha_256_app_file(archive_file)
if not v:
print("Failed generating hash for file %s" % archive_file)
sys.exit(1)
# and then compare
if hash_file_contents[0:64] == r:
return True
else:
return False
if __name__ == "__main__":
if len(sys.argv) < 2:
puaq()
archive_file = sys.argv[1]
hash_file = ""
if len(sys.argv) > 2:
hash_file = sys.argv[2]
else:
hash_file = archive_file + ".sha256"
if not os.path.isfile(archive_file):
print("%s does not exist. Aborting." % archive_file)
sys.exit(1)
if not os.path.isfile(hash_file):
print("%s does not exist. Aborting." % hash_file)
sys.exit(1)
if sha256sum_check(archive_file, hash_file):
print("Correct match")
else:
print("Check failed!")
sys.exit(1)
| mit | 7,459,735,787,600,688,000 | 21.62963 | 80 | 0.576923 | false |
Kokemomo/Kokemomo | kokemomo/plugins/engine/model/km_storage/impl/km_rdb_adapter.py | 1 | 4103 | from sqlalchemy import Column, Integer, String, DateTime, Boolean, Text, func
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from kokemomo.plugins.engine.model.km_storage.km_adapter import BaseAdapter
class BaseModel(object):
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
def __repr__(self):
return '<%s>' % self.__class__.__name__
def validate(self):
pass
def save(self, validate=True):
try:
adapter.add(self)
adapter.commit()
except:
adapter.rollback()
raise
def delete(self):
try:
adapter.delete(self)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def all(cls, order=None, commit=True):
res = adapter.session.query(cls).order_by(order).all()
if commit:
adapter.session.commit()
return res
@classmethod
def get(cls, id, commit=True):
res = adapter.session.query(cls).filter(cls.id == id).first()
if commit:
adapter.session.commit()
return res
@classmethod
def delete_by_id(cls, id):
try:
elem = cls.get(id)
adapter.delete(elem)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def delete_by_condition(cls, **kwargs):
target_list = cls.find(**kwargs)
try:
for target in target_list:
adapter.session.delete(target)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def find(cls, order=None, commit=True, **kwargs):
res = adapter.session.query(cls).order_by(order).filter_by(**kwargs).all()
if commit:
adapter.session.commit()
return res
class KMRDBAdapter(BaseAdapter):
def __init__(self, rdb_path, options):
self.rdb_path = rdb_path
self.options = options
self.Model = declarative_base(cls=BaseModel)
self.fields = [Column, String, Integer, Boolean, Text, DateTime]
for field in self.fields:
setattr(self, field.__name__, field)
@property
def metadata(self):
return self.Model.metadata
def init(self, rdb_path=None, options={}):
self.session = scoped_session(sessionmaker())
if rdb_path:
self.rdb_path = rdb_path
self.engine = create_engine(self.rdb_path, **options)
self.session.configure(bind=self.engine)
self.metadata.create_all(self.engine)
def drop_all(self):
self.metadata.drop_all(self.engine)
def add(self, *args, **kwargs):
self.session.add(*args, **kwargs)
def delete(self, *args, **kwargs):
self.session.delete(*args, **kwargs)
def commit(self):
self.session.commit()
def set(self, *args, **kwargs):
self.add(*args, **kwargs)
self.commit()
def rollback(self):
self.session.rollback()
def get(self, *args, **kwargs):
pass
def rollback():
adapter.session.rollback()
class Transaction(object):
@classmethod
def begin(cls):
return adapter.session.begin(subtransactions=True)
@classmethod
def add(cls, *args, **kwargs):
adapter.session.add(*args, **kwargs)
@classmethod
def delete(cls, *args, **kwargs):
adapter.session.delete(*args, **kwargs)
@classmethod
def commit(cls):
adapter.session.commit()
@classmethod
def rollback(self):
adapter.session.rollback()
from kokemomo.settings.common import DATA_BASE, DATA_BASE_OPTIONS
adapter = KMRDBAdapter(DATA_BASE, DATA_BASE_OPTIONS)
| mit | 8,601,652,151,066,538,000 | 25.642857 | 82 | 0.60078 | false |
prashanthpai/swift | swift/common/ring/builder.py | 1 | 73566 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import itertools
import logging
import math
import random
import six.moves.cPickle as pickle
from copy import deepcopy
from contextlib import contextmanager
from array import array
from collections import defaultdict
import six
from six.moves import range
from time import time
from swift.common import exceptions
from swift.common.ring import RingData
from swift.common.ring.utils import tiers_for_dev, build_tier_tree, \
validate_and_normalize_address
# we can't store None's in the replica2part2dev array, so we high-jack
# the max value for magic to represent the part is not currently
# assigned to any device.
NONE_DEV = 2 ** 16 - 1
MAX_BALANCE = 999.99
MAX_BALANCE_GATHER_COUNT = 3
class RingValidationWarning(Warning):
pass
try:
# python 2.7+
from logging import NullHandler
except ImportError:
# python 2.6
class NullHandler(logging.Handler):
def emit(self, *a, **kw):
pass
class RingBuilder(object):
"""
Used to build swift.common.ring.RingData instances to be written to disk
and used with swift.common.ring.Ring instances. See bin/swift-ring-builder
for example usage.
The instance variable devs_changed indicates if the device information has
changed since the last balancing. This can be used by tools to know whether
a rebalance request is an isolated request or due to added, changed, or
removed devices.
:param part_power: number of partitions = 2**part_power.
:param replicas: number of replicas for each partition
:param min_part_hours: minimum number of hours between partition changes
"""
def __init__(self, part_power, replicas, min_part_hours):
if part_power > 32:
raise ValueError("part_power must be at most 32 (was %d)"
% (part_power,))
if replicas < 1:
raise ValueError("replicas must be at least 1 (was %.6f)"
% (replicas,))
if min_part_hours < 0:
raise ValueError("min_part_hours must be non-negative (was %d)"
% (min_part_hours,))
self.part_power = part_power
self.replicas = replicas
self.min_part_hours = min_part_hours
self.parts = 2 ** self.part_power
self.devs = []
self.devs_changed = False
self.version = 0
self.overload = 0.0
# _replica2part2dev maps from replica number to partition number to
# device id. So, for a three replica, 2**23 ring, it's an array of
# three 2**23 arrays of device ids (unsigned shorts). This can work a
# bit faster than the 2**23 array of triplet arrays of device ids in
# many circumstances. Making one big 2**23 * 3 array didn't seem to
# have any speed change; though you're welcome to try it again (it was
# a while ago, code-wise, when I last tried it).
self._replica2part2dev = None
# _last_part_moves is an array of unsigned bytes representing
# the number of hours since a given partition was last moved.
# This is used to guarantee we don't move a partition twice
# within a given number of hours (24 is my usual test). Removing
# a device overrides this behavior as it's assumed that's only
# done because of device failure.
self._last_part_moves = None
# _last_part_moves_epoch indicates the time the offsets in
# _last_part_moves is based on.
self._last_part_moves_epoch = 0
self._last_part_gather_start = 0
self._dispersion_graph = {}
self.dispersion = 0.0
self._remove_devs = []
self._ring = None
self.logger = logging.getLogger("swift.ring.builder")
if not self.logger.handlers:
self.logger.disabled = True
# silence "no handler for X" error messages
self.logger.addHandler(NullHandler())
@contextmanager
def debug(self):
"""
Temporarily enables debug logging, useful in tests, e.g.
with rb.debug():
rb.rebalance()
"""
self.logger.disabled = False
try:
yield
finally:
self.logger.disabled = True
@property
def min_part_seconds_left(self):
"""Get the total seconds until a rebalance can be performed"""
elapsed_seconds = int(time() - self._last_part_moves_epoch)
return max((self.min_part_hours * 3600) - elapsed_seconds, 0)
def weight_of_one_part(self):
"""
Returns the weight of each partition as calculated from the
total weight of all the devices.
"""
try:
return self.parts * self.replicas / \
sum(d['weight'] for d in self._iter_devs())
except ZeroDivisionError:
raise exceptions.EmptyRingError('There are no devices in this '
'ring, or all devices have been '
'deleted')
@classmethod
def from_dict(cls, builder_data):
b = cls(1, 1, 1) # Dummy values
b.copy_from(builder_data)
return b
def copy_from(self, builder):
"""
Reinitializes this RingBuilder instance from data obtained from the
builder dict given. Code example::
b = RingBuilder(1, 1, 1) # Dummy values
b.copy_from(builder)
This is to restore a RingBuilder that has had its b.to_dict()
previously saved.
"""
if hasattr(builder, 'devs'):
self.part_power = builder.part_power
self.replicas = builder.replicas
self.min_part_hours = builder.min_part_hours
self.parts = builder.parts
self.devs = builder.devs
self.devs_changed = builder.devs_changed
self.overload = builder.overload
self.version = builder.version
self._replica2part2dev = builder._replica2part2dev
self._last_part_moves_epoch = builder._last_part_moves_epoch
self._last_part_moves = builder._last_part_moves
self._last_part_gather_start = builder._last_part_gather_start
self._remove_devs = builder._remove_devs
else:
self.part_power = builder['part_power']
self.replicas = builder['replicas']
self.min_part_hours = builder['min_part_hours']
self.parts = builder['parts']
self.devs = builder['devs']
self.devs_changed = builder['devs_changed']
self.overload = builder.get('overload', 0.0)
self.version = builder['version']
self._replica2part2dev = builder['_replica2part2dev']
self._last_part_moves_epoch = builder['_last_part_moves_epoch']
self._last_part_moves = builder['_last_part_moves']
self._last_part_gather_start = builder['_last_part_gather_start']
self._dispersion_graph = builder.get('_dispersion_graph', {})
self.dispersion = builder.get('dispersion')
self._remove_devs = builder['_remove_devs']
self._ring = None
# Old builders may not have a region defined for their devices, in
# which case we default it to 1.
for dev in self._iter_devs():
dev.setdefault("region", 1)
if not self._last_part_moves_epoch:
self._last_part_moves_epoch = 0
def __deepcopy__(self, memo):
return type(self).from_dict(deepcopy(self.to_dict(), memo))
def to_dict(self):
"""
Returns a dict that can be used later with copy_from to
restore a RingBuilder. swift-ring-builder uses this to
pickle.dump the dict to a file and later load that dict into
copy_from.
"""
return {'part_power': self.part_power,
'replicas': self.replicas,
'min_part_hours': self.min_part_hours,
'parts': self.parts,
'devs': self.devs,
'devs_changed': self.devs_changed,
'version': self.version,
'overload': self.overload,
'_replica2part2dev': self._replica2part2dev,
'_last_part_moves_epoch': self._last_part_moves_epoch,
'_last_part_moves': self._last_part_moves,
'_last_part_gather_start': self._last_part_gather_start,
'_dispersion_graph': self._dispersion_graph,
'dispersion': self.dispersion,
'_remove_devs': self._remove_devs}
def change_min_part_hours(self, min_part_hours):
"""
Changes the value used to decide if a given partition can be moved
again. This restriction is to give the overall system enough time to
settle a partition to its new location before moving it to yet another
location. While no data would be lost if a partition is moved several
times quickly, it could make that data unreachable for a short period
of time.
This should be set to at least the average full partition replication
time. Starting it at 24 hours and then lowering it to what the
replicator reports as the longest partition cycle is best.
:param min_part_hours: new value for min_part_hours
"""
self.min_part_hours = min_part_hours
def set_replicas(self, new_replica_count):
"""
Changes the number of replicas in this ring.
If the new replica count is sufficiently different that
self._replica2part2dev will change size, sets
self.devs_changed. This is so tools like
bin/swift-ring-builder can know to write out the new ring
rather than bailing out due to lack of balance change.
"""
old_slots_used = int(self.parts * self.replicas)
new_slots_used = int(self.parts * new_replica_count)
if old_slots_used != new_slots_used:
self.devs_changed = True
self.replicas = new_replica_count
def set_overload(self, overload):
self.overload = overload
def get_ring(self):
"""
Get the ring, or more specifically, the swift.common.ring.RingData.
This ring data is the minimum required for use of the ring. The ring
builder itself keeps additional data such as when partitions were last
moved.
"""
# We cache the self._ring value so multiple requests for it don't build
# it multiple times. Be sure to set self._ring = None whenever the ring
# will need to be rebuilt.
if not self._ring:
# Make devs list (with holes for deleted devices) and not including
# builder-specific extra attributes.
devs = [None] * len(self.devs)
for dev in self._iter_devs():
devs[dev['id']] = dict((k, v) for k, v in dev.items()
if k not in ('parts', 'parts_wanted'))
# Copy over the replica+partition->device assignments, the device
# information, and the part_shift value (the number of bits to
# shift an unsigned int >I right to obtain the partition for the
# int).
if not self._replica2part2dev:
self._ring = RingData([], devs, 32 - self.part_power)
else:
self._ring = \
RingData([array('H', p2d) for p2d in
self._replica2part2dev],
devs, 32 - self.part_power)
return self._ring
def add_dev(self, dev):
"""
Add a device to the ring. This device dict should have a minimum of the
following keys:
====== ===============================================================
id unique integer identifier amongst devices. Defaults to the next
id if the 'id' key is not provided in the dict
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
region integer indicating which region the device is in
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same (region, zone) pair if there is any alternative
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev: device dict
:returns: id of device (not used in the tree anymore, but unknown
users may depend on it)
"""
if 'id' not in dev:
dev['id'] = 0
if self.devs:
try:
dev['id'] = self.devs.index(None)
except ValueError:
dev['id'] = len(self.devs)
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
raise exceptions.DuplicateDeviceError(
'Duplicate device id: %d' % dev['id'])
# Add holes to self.devs to ensure self.devs[dev['id']] will be the dev
while dev['id'] >= len(self.devs):
self.devs.append(None)
dev['weight'] = float(dev['weight'])
dev['parts'] = 0
self.devs[dev['id']] = dev
self.devs_changed = True
self.version += 1
return dev['id']
def set_dev_weight(self, dev_id, weight):
"""
Set the weight of a device. This should be called rather than just
altering the weight key in the device dict directly, as the builder
will need to rebuild some internal state to reflect the change.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
:param weight: new weight for device
"""
if any(dev_id == d['id'] for d in self._remove_devs):
raise ValueError("Can not set weight of dev_id %s because it "
"is marked for removal" % (dev_id,))
self.devs[dev_id]['weight'] = weight
self.devs_changed = True
self.version += 1
def remove_dev(self, dev_id):
"""
Remove a device from the ring.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
"""
dev = self.devs[dev_id]
dev['weight'] = 0
self._remove_devs.append(dev)
self.devs_changed = True
self.version += 1
def rebalance(self, seed=None):
"""
Rebalance the ring.
This is the main work function of the builder, as it will assign and
reassign partitions to devices in the ring based on weights, distinct
zones, recent reassignments, etc.
The process doesn't always perfectly assign partitions (that'd take a
lot more analysis and therefore a lot more time -- I had code that did
that before). Because of this, it keeps rebalancing until the device
skew (number of partitions a device wants compared to what it has) gets
below 1% or doesn't change by more than 1% (only happens with a ring
that can't be balanced no matter what).
:returns: (number_of_partitions_altered, resulting_balance,
number_of_removed_devices)
"""
# count up the devs, and cache some stuff
num_devices = 0
for dev in self._iter_devs():
dev['tiers'] = tiers_for_dev(dev)
if dev['weight'] > 0:
num_devices += 1
if num_devices < self.replicas:
raise exceptions.RingValidationError(
"Replica count of %(replicas)s requires more "
"than %(num_devices)s devices" % {
'replicas': self.replicas,
'num_devices': num_devices,
})
if seed is not None:
random.seed(seed)
self._ring = None
old_replica2part2dev = copy.deepcopy(self._replica2part2dev)
if self._last_part_moves is None:
self.logger.debug("New builder; performing initial balance")
self._last_part_moves = array('B', itertools.repeat(0, self.parts))
self._update_last_part_moves()
replica_plan = self._build_replica_plan()
self._set_parts_wanted(replica_plan)
assign_parts = defaultdict(list)
# gather parts from replica count adjustment
self._adjust_replica2part2dev_size(assign_parts)
# gather parts from failed devices
removed_devs = self._gather_parts_from_failed_devices(assign_parts)
# gather parts for dispersion (N.B. this only picks up parts that
# *must* disperse according to the replica plan)
self._gather_parts_for_dispersion(assign_parts, replica_plan)
# we'll gather a few times, or until we archive the plan
for gather_count in range(MAX_BALANCE_GATHER_COUNT):
self._gather_parts_for_balance(assign_parts, replica_plan)
if not assign_parts:
# most likely min part hours
finish_status = 'Unable to finish'
break
assign_parts_list = list(assign_parts.items())
# shuffle the parts to be reassigned, we have no preference on the
# order in which the replica plan is fulfilled.
random.shuffle(assign_parts_list)
# reset assign_parts map for next iteration
assign_parts = defaultdict(list)
num_part_replicas = sum(len(r) for p, r in assign_parts_list)
self.logger.debug("Gathered %d parts", num_part_replicas)
self._reassign_parts(assign_parts_list, replica_plan)
self.logger.debug("Assigned %d parts", num_part_replicas)
if not sum(d['parts_wanted'] < 0 for d in
self._iter_devs()):
finish_status = 'Finished'
break
else:
finish_status = 'Unable to finish'
self.logger.debug('%s rebalance plan after %s attempts' % (
finish_status, gather_count + 1))
self.devs_changed = False
self.version += 1
changed_parts = self._build_dispersion_graph(old_replica2part2dev)
# clean up the cache
for dev in self._iter_devs():
dev.pop('tiers', None)
return changed_parts, self.get_balance(), removed_devs
def _build_dispersion_graph(self, old_replica2part2dev=None):
"""
Build a dict of all tiers in the cluster to a list of the number of
parts with a replica count at each index. The values of the dict will
be lists of length the maximum whole replica + 1 so that the
graph[tier][3] is the number of parts within the tier with 3 replicas
and graph [tier][0] is the number of parts not assigned in this tier.
i.e.
{
<tier>: [
<number_of_parts_with_0_replicas>,
<number_of_parts_with_1_replicas>,
...
<number_of_parts_with_n_replicas>,
],
...
}
:param old_replica2part2dev: if called from rebalance, the
old_replica2part2dev can be used to count moved parts.
:returns: number of parts with different assignments than
old_replica2part2dev if provided
"""
# Since we're going to loop over every replica of every part we'll
# also count up changed_parts if old_replica2part2dev is passed in
old_replica2part2dev = old_replica2part2dev or []
# Compare the partition allocation before and after the rebalance
# Only changed device ids are taken into account; devices might be
# "touched" during the rebalance, but actually not really moved
changed_parts = 0
int_replicas = int(math.ceil(self.replicas))
max_allowed_replicas = self._build_max_replicas_by_tier()
parts_at_risk = 0
dispersion_graph = {}
# go over all the devices holding each replica part by part
for part_id, dev_ids in enumerate(
six.moves.zip(*self._replica2part2dev)):
# count the number of replicas of this part for each tier of each
# device, some devices may have overlapping tiers!
replicas_at_tier = defaultdict(int)
for rep_id, dev in enumerate(iter(
self.devs[dev_id] for dev_id in dev_ids)):
for tier in (dev.get('tiers') or tiers_for_dev(dev)):
replicas_at_tier[tier] += 1
# IndexErrors will be raised if the replicas are increased or
# decreased, and that actually means the partition has changed
try:
old_device = old_replica2part2dev[rep_id][part_id]
except IndexError:
changed_parts += 1
continue
if old_device != dev['id']:
changed_parts += 1
part_at_risk = False
# update running totals for each tiers' number of parts with a
# given replica count
for tier, replicas in replicas_at_tier.items():
if tier not in dispersion_graph:
dispersion_graph[tier] = [self.parts] + [0] * int_replicas
dispersion_graph[tier][0] -= 1
dispersion_graph[tier][replicas] += 1
if replicas > max_allowed_replicas[tier]:
part_at_risk = True
# this part may be at risk in multiple tiers, but we only count it
# as at_risk once
if part_at_risk:
parts_at_risk += 1
self._dispersion_graph = dispersion_graph
self.dispersion = 100.0 * parts_at_risk / self.parts
return changed_parts
def validate(self, stats=False):
"""
Validate the ring.
This is a safety function to try to catch any bugs in the building
process. It ensures partitions have been assigned to real devices,
aren't doubly assigned, etc. It can also optionally check the even
distribution of partitions across devices.
:param stats: if True, check distribution of partitions across devices
:returns: if stats is True, a tuple of (device_usage, worst_stat), else
(None, None). device_usage[dev_id] will equal the number of
partitions assigned to that device. worst_stat will equal the
number of partitions the worst device is skewed from the
number it should have.
:raises RingValidationError: problem was found with the ring.
"""
# "len" showed up in profiling, so it's just computed once.
dev_len = len(self.devs)
parts_on_devs = sum(d['parts'] for d in self._iter_devs())
if not self._replica2part2dev:
raise exceptions.RingValidationError(
'_replica2part2dev empty; did you forget to rebalance?')
parts_in_map = sum(len(p2d) for p2d in self._replica2part2dev)
if parts_on_devs != parts_in_map:
raise exceptions.RingValidationError(
'All partitions are not double accounted for: %d != %d' %
(parts_on_devs, parts_in_map))
if stats:
# dev_usage[dev_id] will equal the number of partitions assigned to
# that device.
dev_usage = array('I', (0 for _junk in range(dev_len)))
for part2dev in self._replica2part2dev:
for dev_id in part2dev:
dev_usage[dev_id] += 1
for dev in self._iter_devs():
if not isinstance(dev['port'], int):
raise exceptions.RingValidationError(
"Device %d has port %r, which is not an integer." %
(dev['id'], dev['port']))
int_replicas = int(math.ceil(self.replicas))
rep2part_len = map(len, self._replica2part2dev)
# check the assignments of each part's replicas
for part in range(self.parts):
devs_for_part = []
for replica, part_len in enumerate(rep2part_len):
if part_len <= part:
# last replica may be short on parts because of floating
# replica count
if replica + 1 < int_replicas:
raise exceptions.RingValidationError(
"The partition assignments of replica %r were "
"shorter than expected (%s < %s) - this should "
"only happen for the last replica" % (
replica,
len(self._replica2part2dev[replica]),
self.parts,
))
break
dev_id = self._replica2part2dev[replica][part]
if dev_id >= dev_len or not self.devs[dev_id]:
raise exceptions.RingValidationError(
"Partition %d, replica %d was not allocated "
"to a device." %
(part, replica))
devs_for_part.append(dev_id)
if len(devs_for_part) != len(set(devs_for_part)):
raise exceptions.RingValidationError(
"The partition %s has been assigned to "
"duplicate devices %r" % (
part, devs_for_part))
if stats:
weight_of_one_part = self.weight_of_one_part()
worst = 0
for dev in self._iter_devs():
if not dev['weight']:
if dev_usage[dev['id']]:
# If a device has no weight, but has partitions, then
# its overage is considered "infinity" and therefore
# always the worst possible. We show MAX_BALANCE for
# convenience.
worst = MAX_BALANCE
break
continue
skew = abs(100.0 * dev_usage[dev['id']] /
(dev['weight'] * weight_of_one_part) - 100.0)
if skew > worst:
worst = skew
return dev_usage, worst
return None, None
def _build_balance_per_dev(self):
"""
Build a map of <device_id> => <balance> where <balance> is a float
representing the percentage difference from the desired amount of
partitions a given device wants and the amount it has.
N.B. this method only considers a device's weight and the parts
assigned, not the parts wanted according to the replica plan.
"""
weight_of_one_part = self.weight_of_one_part()
balance_per_dev = {}
for dev in self._iter_devs():
if not dev['weight']:
if dev['parts']:
# If a device has no weight, but has partitions, then its
# overage is considered "infinity" and therefore always the
# worst possible. We show MAX_BALANCE for convenience.
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / (
dev['weight'] * weight_of_one_part) - 100.0
balance_per_dev[dev['id']] = balance
return balance_per_dev
def get_balance(self):
"""
Get the balance of the ring. The balance value is the highest
percentage of the desired amount of partitions a given device
wants. For instance, if the "worst" device wants (based on its
weight relative to the sum of all the devices' weights) 123
partitions and it has 124 partitions, the balance value would
be 0.83 (1 extra / 123 wanted * 100 for percentage).
:returns: balance of the ring
"""
balance_per_dev = self._build_balance_per_dev()
return max(abs(b) for b in balance_per_dev.values())
def get_required_overload(self, weighted=None, wanted=None):
"""
Returns the minimum overload value required to make the ring maximally
dispersed.
The required overload is the largest percentage change of any single
device from its weighted replicanth to its wanted replicanth (note:
under weighted devices have a negative percentage change) to archive
dispersion - that is to say a single device that must be overloaded by
5% is worse than 5 devices in a single tier overloaded by 1%.
"""
weighted = weighted or self._build_weighted_replicas_by_tier()
wanted = wanted or self._build_wanted_replicas_by_tier()
max_overload = 0.0
for dev in self._iter_devs():
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
if not dev['weight']:
if tier not in wanted or not wanted[tier]:
continue
raise exceptions.RingValidationError(
'Device %s has zero weight and '
'should not want any replicas' % (tier,))
required = (wanted[tier] - weighted[tier]) / weighted[tier]
self.logger.debug('%s wants %s and is weighted for %s so '
'therefore requires %s overload' % (
tier, wanted[tier], weighted[tier],
required))
if required > max_overload:
max_overload = required
return max_overload
def pretend_min_part_hours_passed(self):
"""
Override min_part_hours by marking all partitions as having been moved
255 hours ago and last move epoch to 'the beginning of time'. This can
be used to force a full rebalance on the next call to rebalance.
"""
self._last_part_moves_epoch = 0
if not self._last_part_moves:
return
for part in range(self.parts):
self._last_part_moves[part] = 0xff
def get_part_devices(self, part):
"""
Get the devices that are responsible for the partition,
filtering out duplicates.
:param part: partition to get devices for
:returns: list of device dicts
"""
devices = []
for dev in self._devs_for_part(part):
if dev not in devices:
devices.append(dev)
return devices
def _iter_devs(self):
"""
Returns an iterator all the non-None devices in the ring. Note that
this means list(b._iter_devs())[some_id] may not equal b.devs[some_id];
you will have to check the 'id' key of each device to obtain its
dev_id.
"""
for dev in self.devs:
if dev is not None:
yield dev
def _build_tier2children(self):
"""
Wrap helper build_tier_tree so exclude zero-weight devices.
"""
return build_tier_tree(d for d in self._iter_devs() if d['weight'])
def _set_parts_wanted(self, replica_plan):
"""
Sets the parts_wanted key for each of the devices to the number of
partitions the device wants based on its relative weight. This key is
used to sort the devices according to "most wanted" during rebalancing
to best distribute partitions. A negative parts_wanted indicates the
device is "overweight" and wishes to give partitions away if possible.
:param replica_plan: a dict of dicts, as returned from
_build_replica_plan, that that maps
each tier to it's target replicanths.
"""
tier2children = self._build_tier2children()
parts_by_tier = defaultdict(int)
def place_parts(tier, parts):
parts_by_tier[tier] = parts
sub_tiers = sorted(tier2children[tier])
if not sub_tiers:
return
to_place = defaultdict(int)
for t in sub_tiers:
to_place[t] = int(math.floor(
replica_plan[t]['target'] * self.parts))
parts -= to_place[t]
# if there's some parts left over, just throw 'em about
sub_tier_gen = itertools.cycle(sorted(
sub_tiers, key=lambda t: replica_plan[t]['target']))
while parts:
t = next(sub_tier_gen)
to_place[t] += 1
parts -= 1
for t, p in to_place.items():
place_parts(t, p)
total_parts = int(self.replicas * self.parts)
place_parts((), total_parts)
# belts & suspenders/paranoia - at every level, the sum of
# parts_by_tier should be total_parts for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
parts_at_tier = sum(parts_by_tier[t] for t in parts_by_tier
if len(t) == i)
if parts_at_tier != total_parts:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
parts_at_tier, total_parts, tier_name))
for dev in self._iter_devs():
if not dev['weight']:
# With no weight, that means we wish to "drain" the device. So
# we set the parts_wanted to a really large negative number to
# indicate its strong desire to give up everything it has.
dev['parts_wanted'] = -self.parts * self.replicas
else:
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
dev['parts_wanted'] = parts_by_tier[tier] - dev['parts']
def _update_last_part_moves(self):
"""
Updates how many hours ago each partition was moved based on the
current time. The builder won't move a partition that has been moved
more recently than min_part_hours.
"""
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
if elapsed_hours <= 0:
return
for part in range(self.parts):
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
# which was here showed up in profiling, so it got inlined.
last_plus_elapsed = self._last_part_moves[part] + elapsed_hours
if last_plus_elapsed < 0xff:
self._last_part_moves[part] = last_plus_elapsed
else:
self._last_part_moves[part] = 0xff
self._last_part_moves_epoch = int(time())
def _gather_parts_from_failed_devices(self, assign_parts):
"""
Update the map of partition => [replicas] to be reassigned from
removed devices.
"""
# First we gather partitions from removed devices. Since removed
# devices usually indicate device failures, we have no choice but to
# reassign these partitions. However, we mark them as moved so later
# choices will skip other replicas of the same partition if possible.
if self._remove_devs:
dev_ids = [d['id'] for d in self._remove_devs if d['parts']]
if dev_ids:
for part, replica in self._each_part_replica():
dev_id = self._replica2part2dev[replica][part]
if dev_id in dev_ids:
self._replica2part2dev[replica][part] = NONE_DEV
self._last_part_moves[part] = 0
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [dev removed]",
part, replica, dev_id)
removed_devs = 0
while self._remove_devs:
remove_dev_id = self._remove_devs.pop()['id']
self.logger.debug("Removing dev %d", remove_dev_id)
self.devs[remove_dev_id] = None
removed_devs += 1
return removed_devs
def _adjust_replica2part2dev_size(self, to_assign):
"""
Make sure that the lengths of the arrays in _replica2part2dev
are correct for the current value of self.replicas.
Example:
self.part_power = 8
self.replicas = 2.25
self._replica2part2dev will contain 3 arrays: the first 2 of
length 256 (2**8), and the last of length 64 (0.25 * 2**8).
Update the mapping of partition => [replicas] that need assignment.
"""
fractional_replicas, whole_replicas = math.modf(self.replicas)
whole_replicas = int(whole_replicas)
removed_parts = 0
new_parts = 0
desired_lengths = [self.parts] * whole_replicas
if fractional_replicas:
desired_lengths.append(int(self.parts * fractional_replicas))
if self._replica2part2dev is not None:
# If we crossed an integer threshold (say, 4.1 --> 4),
# we'll have a partial extra replica clinging on here. Clean
# up any such extra stuff.
for part2dev in self._replica2part2dev[len(desired_lengths):]:
for dev_id in part2dev:
dev_losing_part = self.devs[dev_id]
dev_losing_part['parts'] -= 1
removed_parts -= 1
self._replica2part2dev = \
self._replica2part2dev[:len(desired_lengths)]
else:
self._replica2part2dev = []
for replica, desired_length in enumerate(desired_lengths):
if replica < len(self._replica2part2dev):
part2dev = self._replica2part2dev[replica]
if len(part2dev) < desired_length:
# Not long enough: needs to be extended and the
# newly-added pieces assigned to devices.
for part in range(len(part2dev), desired_length):
to_assign[part].append(replica)
part2dev.append(NONE_DEV)
new_parts += 1
elif len(part2dev) > desired_length:
# Too long: truncate this mapping.
for part in range(desired_length, len(part2dev)):
dev_losing_part = self.devs[part2dev[part]]
dev_losing_part['parts'] -= 1
removed_parts -= 1
self._replica2part2dev[replica] = part2dev[:desired_length]
else:
# Mapping not present at all: make one up and assign
# all of it.
for part in range(desired_length):
to_assign[part].append(replica)
new_parts += 1
self._replica2part2dev.append(
array('H', itertools.repeat(NONE_DEV, desired_length)))
self.logger.debug(
"%d new parts and %d removed parts from replica-count change",
new_parts, removed_parts)
def _gather_parts_for_dispersion(self, assign_parts, replica_plan):
"""
Update the map of partition => [replicas] to be reassigned from
insufficiently-far-apart replicas.
"""
# Now we gather partitions that are "at risk" because they aren't
# currently sufficient spread out across the cluster.
for part in range(self.parts):
if self._last_part_moves[part] < self.min_part_hours:
continue
# First, add up the count of replicas at each tier for each
# partition.
replicas_at_tier = defaultdict(int)
for dev in self._devs_for_part(part):
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
# Now, look for partitions not yet spread out enough.
undispersed_dev_replicas = []
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
if all(replicas_at_tier[tier] <=
replica_plan[tier]['max']
for tier in dev['tiers']):
continue
undispersed_dev_replicas.append((dev, replica))
if not undispersed_dev_replicas:
continue
undispersed_dev_replicas.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in undispersed_dev_replicas:
# the min part hour check is ignored if and only if a device
# has more than one replica of a part assigned to it - which
# would have only been possible on rings built with an older
# version of the code
if (self._last_part_moves[part] < self.min_part_hours and
not replicas_at_tier[dev['tiers'][-1]] > 1):
continue
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [dispersion]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
for tier in dev['tiers']:
replicas_at_tier[tier] -= 1
self._last_part_moves[part] = 0
def _gather_parts_for_balance_can_disperse(self, assign_parts, start,
replica_plan):
"""
Update the map of partition => [replicas] to be reassigned from
overweight drives where the replicas can be better dispersed to
another failure domain.
:param assign_parts: the map of partition => [replica] to update
:param start: offset into self.parts to begin search
:param replica_plan: replicanth targets for tiers
"""
# Last, we gather partitions from devices that are "overweight" because
# they have more partitions than their parts_wanted.
for offset in range(self.parts):
part = (start + offset) % self.parts
if self._last_part_moves[part] < self.min_part_hours:
continue
# For each part we'll look at the devices holding those parts and
# see if any are overweight, keeping track of replicas_at_tier as
# we go
overweight_dev_replica = []
replicas_at_tier = defaultdict(int)
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
if dev['parts_wanted'] < 0:
overweight_dev_replica.append((dev, replica))
if not overweight_dev_replica:
continue
overweight_dev_replica.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in overweight_dev_replica:
if self._last_part_moves[part] < self.min_part_hours:
break
if any(replica_plan[tier]['min'] <=
replicas_at_tier[tier] <
replica_plan[tier]['max']
for tier in dev['tiers']):
continue
# this is the most overweight_device holding a replica
# of this part that can shed it according to the plan
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [weight disperse]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
for tier in dev['tiers']:
replicas_at_tier[tier] -= 1
self._last_part_moves[part] = 0
def _gather_parts_for_balance(self, assign_parts, replica_plan):
"""
Gather parts that look like they should move for balance reasons.
A simple gathers of parts that looks dispersible normally works out,
we'll switch strategies if things don't seem to move.
"""
# pick a random starting point on the other side of the ring
quarter_turn = (self.parts // 4)
random_half = random.randint(0, self.parts / 2)
start = (self._last_part_gather_start + quarter_turn +
random_half) % self.parts
self.logger.debug('Gather start is %s '
'(Last start was %s)' % (
start, self._last_part_gather_start))
self._last_part_gather_start = start
self._gather_parts_for_balance_can_disperse(
assign_parts, start, replica_plan)
if not assign_parts:
self._gather_parts_for_balance_forced(assign_parts, start)
def _gather_parts_for_balance_forced(self, assign_parts, start, **kwargs):
"""
Update the map of partition => [replicas] to be reassigned from
overweight drives without restriction, parts gathered from this method
may be placed back onto devices that are no better (or worse) than the
device from which they are gathered.
This method allows devices to flop around enough to unlock replicas
that would have otherwise potentially been locked because of
dispersion - it should be used as a last resort.
:param assign_parts: the map of partition => [replica] to update
:param start: offset into self.parts to begin search
"""
for offset in range(self.parts):
part = (start + offset) % self.parts
if self._last_part_moves[part] < self.min_part_hours:
continue
overweight_dev_replica = []
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
if dev['parts_wanted'] < 0:
overweight_dev_replica.append((dev, replica))
if not overweight_dev_replica:
continue
overweight_dev_replica.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in overweight_dev_replica:
if self._last_part_moves[part] < self.min_part_hours:
break
# this is the most overweight_device holding a replica of this
# part we don't know where it's going to end up - but we'll
# pick it up and hope for the best.
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [weight forced]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
self._last_part_moves[part] = 0
def _reassign_parts(self, reassign_parts, replica_plan):
"""
For an existing ring data set, partitions are reassigned similar to
the initial assignment.
The devices are ordered by how many partitions they still want and
kept in that order throughout the process.
The gathered partitions are iterated through, assigning them to
devices according to the "most wanted" while keeping the replicas as
"far apart" as possible.
Two different regions are considered the farthest-apart things,
followed by zones, then different ip within a zone; the
least-far-apart things are different devices with the same ip in the
same zone.
:param reassign_parts: An iterable of (part, replicas_to_replace)
pairs. replicas_to_replace is an iterable of the
replica (an int) to replace for that partition.
replicas_to_replace may be shared for multiple
partitions, so be sure you do not modify it.
"""
parts_available_in_tier = defaultdict(int)
for dev in self._iter_devs():
dev['sort_key'] = self._sort_key_for(dev)
# Note: this represents how many partitions may be assigned to a
# given tier (region/zone/server/disk). It does not take into
# account how many partitions a given tier wants to shed.
#
# If we did not do this, we could have a zone where, at some
# point during an assignment, number-of-parts-to-gain equals
# number-of-parts-to-shed. At that point, no further placement
# into that zone would occur since its parts_available_in_tier
# would be 0. This would happen any time a zone had any device
# with partitions to shed, which is any time a device is being
# removed, which is a pretty frequent operation.
wanted = max(dev['parts_wanted'], 0)
for tier in dev['tiers']:
parts_available_in_tier[tier] += wanted
available_devs = \
sorted((d for d in self._iter_devs() if d['weight']),
key=lambda x: x['sort_key'])
tier2devs = defaultdict(list)
tier2sort_key = defaultdict(tuple)
tier2dev_sort_key = defaultdict(list)
max_tier_depth = 0
for dev in available_devs:
for tier in dev['tiers']:
tier2devs[tier].append(dev) # <-- starts out sorted!
tier2dev_sort_key[tier].append(dev['sort_key'])
tier2sort_key[tier] = dev['sort_key']
if len(tier) > max_tier_depth:
max_tier_depth = len(tier)
tier2children_sets = build_tier_tree(available_devs)
tier2children = defaultdict(list)
tier2children_sort_key = {}
tiers_list = [()]
depth = 1
while depth <= max_tier_depth:
new_tiers_list = []
for tier in tiers_list:
child_tiers = list(tier2children_sets[tier])
child_tiers.sort(key=tier2sort_key.__getitem__)
tier2children[tier] = child_tiers
tier2children_sort_key[tier] = map(
tier2sort_key.__getitem__, child_tiers)
new_tiers_list.extend(child_tiers)
tiers_list = new_tiers_list
depth += 1
for part, replace_replicas in reassign_parts:
# always update part_moves for min_part_hours
self._last_part_moves[part] = 0
# count up where these replicas be
replicas_at_tier = defaultdict(int)
for dev in self._devs_for_part(part):
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
for replica in replace_replicas:
# Find a new home for this replica
tier = ()
# This used to be a cute, recursive function, but it's been
# unrolled for performance.
depth = 1
while depth <= max_tier_depth:
# Choose the roomiest tier among those that don't
# already have their max replicas assigned according
# to the replica_plan.
candidates = [t for t in tier2children[tier] if
replicas_at_tier[t] <
replica_plan[t]['max']]
if not candidates:
raise Exception('no home for %s/%s %s' % (
part, replica, {t: (
replicas_at_tier[t],
replica_plan[t]['max'],
) for t in tier2children[tier]}))
tier = max(candidates, key=lambda t:
parts_available_in_tier[t])
depth += 1
dev = tier2devs[tier][-1]
dev['parts_wanted'] -= 1
dev['parts'] += 1
for tier in dev['tiers']:
parts_available_in_tier[tier] -= 1
replicas_at_tier[tier] += 1
self._replica2part2dev[replica][part] = dev['id']
self.logger.debug(
"Placed %d/%d onto dev %d", part, replica, dev['id'])
# Just to save memory and keep from accidental reuse.
for dev in self._iter_devs():
del dev['sort_key']
@staticmethod
def _sort_key_for(dev):
return (dev['parts_wanted'], random.randint(0, 0xFFFF), dev['id'])
def _build_max_replicas_by_tier(self, bound=math.ceil):
"""
Returns a defaultdict of (tier: replica_count) for all tiers in the
ring excluding zero weight devices.
There will always be a () entry as the root of the structure, whose
replica_count will equal the ring's replica_count.
Then there will be (region,) entries for each region, indicating the
maximum number of replicas the region might have for any given
partition.
Next there will be (region, zone) entries for each zone, indicating
the maximum number of replicas in a given region and zone. Anything
greater than 1 indicates a partition at slightly elevated risk, as if
that zone were to fail multiple replicas of that partition would be
unreachable.
Next there will be (region, zone, ip_port) entries for each node,
indicating the maximum number of replicas stored on a node in a given
region and zone. Anything greater than 1 indicates a partition at
elevated risk, as if that ip_port were to fail multiple replicas of
that partition would be unreachable.
Last there will be (region, zone, ip_port, device) entries for each
device, indicating the maximum number of replicas the device shares
with other devices on the same node for any given partition.
Anything greater than 1 indicates a partition at serious risk, as the
data on that partition will not be stored distinctly at the ring's
replica_count.
Example return dict for the common SAIO setup::
{(): 3.0,
(1,): 3.0,
(1, 1): 1.0,
(1, 1, '127.0.0.1:6010'): 1.0,
(1, 1, '127.0.0.1:6010', 0): 1.0,
(1, 2): 1.0,
(1, 2, '127.0.0.1:6020'): 1.0,
(1, 2, '127.0.0.1:6020', 1): 1.0,
(1, 3): 1.0,
(1, 3, '127.0.0.1:6030'): 1.0,
(1, 3, '127.0.0.1:6030', 2): 1.0,
(1, 4): 1.0,
(1, 4, '127.0.0.1:6040'): 1.0,
(1, 4, '127.0.0.1:6040', 3): 1.0}
"""
# Used by walk_tree to know what entries to create for each recursive
# call.
tier2children = self._build_tier2children()
def walk_tree(tier, replica_count):
if len(tier) == 4:
# special case for device, it's not recursive
replica_count = min(1, replica_count)
mr = {tier: replica_count}
if tier in tier2children:
subtiers = tier2children[tier]
for subtier in subtiers:
submax = bound(float(replica_count) / len(subtiers))
mr.update(walk_tree(subtier, submax))
return mr
mr = defaultdict(float)
mr.update(walk_tree((), self.replicas))
return mr
def _build_weighted_replicas_by_tier(self):
"""
Returns a dict mapping <tier> => replicanths for all tiers in
the ring based on their weights.
"""
weight_of_one_part = self.weight_of_one_part()
# assign each device some replicanths by weight (can't be > 1)
weighted_replicas_for_dev = {}
devices_with_room = []
for dev in self._iter_devs():
if not dev['weight']:
continue
weighted_replicas = (
dev['weight'] * weight_of_one_part / self.parts)
if weighted_replicas < 1:
devices_with_room.append(dev['id'])
else:
weighted_replicas = 1
weighted_replicas_for_dev[dev['id']] = weighted_replicas
while True:
remaining = self.replicas - sum(weighted_replicas_for_dev.values())
if remaining < 1e-10:
break
devices_with_room = [d for d in devices_with_room if
weighted_replicas_for_dev[d] < 1]
rel_weight = remaining / sum(
weighted_replicas_for_dev[d] for d in devices_with_room)
for d in devices_with_room:
weighted_replicas_for_dev[d] = min(
1, weighted_replicas_for_dev[d] * (rel_weight + 1))
weighted_replicas_by_tier = defaultdict(float)
for dev in self._iter_devs():
if not dev['weight']:
continue
assigned_replicanths = weighted_replicas_for_dev[dev['id']]
dev_tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
for i in range(len(dev_tier) + 1):
tier = dev_tier[:i]
weighted_replicas_by_tier[tier] += assigned_replicanths
# belts & suspenders/paranoia - at every level, the sum of
# weighted_replicas should be very close to the total number of
# replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(weighted_replicas_by_tier[t] for t in
weighted_replicas_by_tier if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return weighted_replicas_by_tier
def _build_wanted_replicas_by_tier(self):
"""
Returns a defaultdict of (tier: replicanths) for all tiers in the ring
based on unique-as-possible (full dispersion) with respect to their
weights and device counts.
N.B. _build_max_replicas_by_tier calculates the upper bound on the
replicanths each tier may hold irrespective of the weights of the
tier; this method will calculate the minimum replicanth <=
max_replicas[tier] that will still solve dispersion. However, it is
not guaranteed to return a fully dispersed solution if failure domains
are over-weighted for their device count.
"""
weighted_replicas = self._build_weighted_replicas_by_tier()
dispersed_replicas = {
t: {
'min': math.floor(r),
'max': math.ceil(r),
} for (t, r) in
self._build_max_replicas_by_tier(bound=float).items()
}
# watch out for device limited tiers
num_devices = defaultdict(int)
for d in self._iter_devs():
if d['weight'] <= 0:
continue
for t in (d.get('tiers') or tiers_for_dev(d)):
num_devices[t] += 1
num_devices[()] += 1
tier2children = self._build_tier2children()
wanted_replicas = defaultdict(float)
def place_replicas(tier, replicanths):
if replicanths > num_devices[tier]:
raise exceptions.RingValidationError(
'More replicanths (%s) than devices (%s) '
'in tier (%s)' % (replicanths, num_devices[tier], tier))
wanted_replicas[tier] = replicanths
sub_tiers = sorted(tier2children[tier])
if not sub_tiers:
return
to_place = defaultdict(float)
remaining = replicanths
tiers_to_spread = sub_tiers
device_limited = False
while True:
rel_weight = remaining / sum(weighted_replicas[t]
for t in tiers_to_spread)
for t in tiers_to_spread:
replicas = to_place[t] + (
weighted_replicas[t] * rel_weight)
if replicas < dispersed_replicas[t]['min']:
replicas = dispersed_replicas[t]['min']
elif (replicas > dispersed_replicas[t]['max'] and
not device_limited):
replicas = dispersed_replicas[t]['max']
if replicas > num_devices[t]:
replicas = num_devices[t]
to_place[t] = replicas
remaining = replicanths - sum(to_place.values())
if remaining < -1e-10:
tiers_to_spread = [
t for t in sub_tiers
if to_place[t] > dispersed_replicas[t]['min']
]
elif remaining > 1e-10:
tiers_to_spread = [
t for t in sub_tiers
if (num_devices[t] > to_place[t] <
dispersed_replicas[t]['max'])
]
if not tiers_to_spread:
device_limited = True
tiers_to_spread = [
t for t in sub_tiers
if to_place[t] < num_devices[t]
]
else:
# remaining is "empty"
break
for t in sub_tiers:
self.logger.debug('Planning %s on %s',
to_place[t], t)
place_replicas(t, to_place[t])
# place all replicas in the cluster tier
place_replicas((), self.replicas)
# belts & suspenders/paranoia - at every level, the sum of
# wanted_replicas should be very close to the total number of
# replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(wanted_replicas[t] for t in
wanted_replicas if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return wanted_replicas
def _build_target_replicas_by_tier(self):
"""
Build a map of <tier> => <target_replicas> accounting for device
weights, unique-as-possible dispersion and overload.
<tier> - a tuple, describing each tier in the ring topology
<target_replicas> - a float, the target replicanths at the tier
"""
weighted_replicas = self._build_weighted_replicas_by_tier()
wanted_replicas = self._build_wanted_replicas_by_tier()
max_overload = self.get_required_overload(weighted=weighted_replicas,
wanted=wanted_replicas)
if max_overload <= 0.0:
return wanted_replicas
else:
overload = min(self.overload, max_overload)
self.logger.debug("Using effective overload of %f", overload)
target_replicas = defaultdict(float)
for tier, weighted in weighted_replicas.items():
m = (wanted_replicas[tier] - weighted) / max_overload
target_replicas[tier] = m * overload + weighted
# belts & suspenders/paranoia - at every level, the sum of
# target_replicas should be very close to the total number
# of replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(target_replicas[t] for t in
target_replicas if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return target_replicas
def _build_replica_plan(self):
"""
Wraps return value of _build_target_replicas_by_tier to include
pre-calculated min and max values for each tier.
:returns: a dict, mapping <tier> => <replica_plan>, where
<replica_plan> is itself a dict
<replica_plan> include at least the following keys:
min - the minimum number of replicas at the tier
target - the target replicanths at the tier
max - the maximum number of replicas at the tier
"""
# replica part-y planner!
target_replicas = self._build_target_replicas_by_tier()
replica_plan = defaultdict(
lambda: {'min': 0, 'target': 0, 'max': 0})
replica_plan.update({
t: {
'min': math.floor(r + 1e-10),
'target': r,
'max': math.ceil(r - 1e-10),
} for (t, r) in
target_replicas.items()
})
return replica_plan
def _devs_for_part(self, part):
"""
Returns a list of devices for a specified partition.
Deliberately includes duplicates.
"""
if self._replica2part2dev is None:
return []
devs = []
for part2dev in self._replica2part2dev:
if part >= len(part2dev):
continue
dev_id = part2dev[part]
if dev_id == NONE_DEV:
continue
devs.append(self.devs[dev_id])
return devs
def _replicas_for_part(self, part):
"""
Returns a list of replicas for a specified partition.
These can be used as indices into self._replica2part2dev
without worrying about IndexErrors.
"""
return [replica for replica, part2dev
in enumerate(self._replica2part2dev)
if part < len(part2dev)]
def _each_part_replica(self):
"""
Generator yielding every (partition, replica) pair in the ring.
"""
for replica, part2dev in enumerate(self._replica2part2dev):
for part in range(len(part2dev)):
yield (part, replica)
@classmethod
def load(cls, builder_file, open=open):
"""
Obtain RingBuilder instance of the provided builder file
:param builder_file: path to builder file to load
:return: RingBuilder instance
"""
try:
fp = open(builder_file, 'rb')
except IOError as e:
if e.errno == errno.ENOENT:
raise exceptions.FileNotFoundError(
'Ring Builder file does not exist: %s' % builder_file)
elif e.errno in [errno.EPERM, errno.EACCES]:
raise exceptions.PermissionError(
'Ring Builder file cannot be accessed: %s' % builder_file)
else:
raise
else:
with fp:
try:
builder = pickle.load(fp)
except Exception:
# raise error during unpickling as UnPicklingError
raise exceptions.UnPicklingError(
'Ring Builder file is invalid: %s' % builder_file)
if not hasattr(builder, 'devs'):
builder_dict = builder
builder = RingBuilder(1, 1, 1)
builder.copy_from(builder_dict)
for dev in builder.devs:
# really old rings didn't have meta keys
if dev and 'meta' not in dev:
dev['meta'] = ''
# NOTE(akscram): An old ring builder file don't contain
# replication parameters.
if dev:
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
return builder
def save(self, builder_file):
"""Serialize this RingBuilder instance to disk.
:param builder_file: path to builder file to save
"""
with open(builder_file, 'wb') as f:
pickle.dump(self.to_dict(), f, protocol=2)
def search_devs(self, search_values):
"""Search devices by parameters.
:param search_values: a dictionary with search values to filter
devices, supported parameters are id,
region, zone, ip, port, replication_ip,
replication_port, device, weight, meta
:returns: list of device dicts
"""
matched_devs = []
for dev in self.devs:
if not dev:
continue
matched = True
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
if key in search_values:
value = search_values.get(key)
if value is not None:
if key == 'meta':
if value not in dev.get(key):
matched = False
elif key == 'ip' or key == 'replication_ip':
cdev = ''
try:
cdev = validate_and_normalize_address(
dev.get(key, ''))
except ValueError:
pass
if cdev != value:
matched = False
elif dev.get(key) != value:
matched = False
if matched:
matched_devs.append(dev)
return matched_devs
def increase_partition_power(self):
""" Increases ring partition power by one.
Devices will be assigned to partitions like this:
OLD: 0, 3, 7, 5, 2, 1, ...
NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ...
"""
new_replica2part2dev = []
for replica in self._replica2part2dev:
new_replica = array('H')
for device in replica:
new_replica.append(device)
new_replica.append(device) # append device a second time
new_replica2part2dev.append(new_replica)
self._replica2part2dev = new_replica2part2dev
for device in self._iter_devs():
device['parts'] *= 2
# We need to update the time when a partition has been moved the last
# time. Since this is an array of all partitions, we need to double it
# two
new_last_part_moves = []
for partition in self._last_part_moves:
new_last_part_moves.append(partition)
new_last_part_moves.append(partition)
self._last_part_moves = new_last_part_moves
self.part_power += 1
self.parts *= 2
self.version += 1
| apache-2.0 | -738,666,777,734,163,300 | 41.572917 | 79 | 0.551532 | false |
tensorflow/datasets | tensorflow_datasets/image/flic.py | 1 | 5873 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Frames Labeled In Cinema (FLIC)."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """@inproceedings{modec13,
title={MODEC: Multimodal Decomposable Models for Human Pose Estimation},
author={Sapp, Benjamin and Taskar, Ben},
booktitle={In Proc. CVPR},
year={2013},
}
"""
_DESCRIPTION = """
From the paper: We collected a 5003 image dataset automatically from popular
Hollywood movies. The images were obtained by running a state-of-the-art person
detector on every tenth frame of 30 movies. People detected with high confidence
(roughly 20K candidates) were then sent to the crowdsourcing marketplace Amazon
Mechanical Turk to obtain groundtruthlabeling. Each image was annotated by five
Turkers for $0.01 each to label 10 upperbody joints. The median-of-five labeling
was taken in each image to be robust to outlier annotation. Finally, images were
rejected manually by us if the person was occluded or severely non-frontal. We
set aside 20% (1016 images) of the data for testing.
"""
_DATA_OPTIONS = ["small", "full"]
_HOMEPAGE_URL = "https://bensapp.github.io/flic-dataset.html"
_URL_SUBSET = "https://drive.google.com/uc?id=0B4K3PZp8xXDJN0Fpb0piVjQ3Y3M&export=download"
_URL_SUPERSET = "https://drive.google.com/uc?id=0B4K3PZp8xXDJd2VwblhhOVBfMDg&export=download"
def _normalize_bbox(raw_bbox, img_path):
"""Normalize torsobox bbox values."""
with tf.io.gfile.GFile(img_path, "rb") as fp:
img = tfds.core.lazy_imports.PIL_Image.open(fp)
width, height = img.size
return tfds.features.BBox(
ymin=raw_bbox[1] / height,
ymax=raw_bbox[3] / height,
xmin=raw_bbox[0] / width,
xmax=raw_bbox[2] / width,
)
class FlicConfig(tfds.core.BuilderConfig):
"""BuilderConfig for FLIC."""
def __init__(self, *, data, **kwargs):
"""Constructs a FlicConfig."""
if data not in _DATA_OPTIONS:
raise ValueError("data must be one of %s" % _DATA_OPTIONS)
descriptions = {
"small": "5003 examples used in CVPR13 MODEC paper.",
"full":
"20928 examples, a superset of FLIC consisting of more difficult "
"examples."
}
description = kwargs.get("description", "Uses %s" % descriptions[data])
kwargs["description"] = description
super(FlicConfig, self).__init__(**kwargs)
self.data = data
self.url = _URL_SUBSET if data == "small" else _URL_SUPERSET
self.dir = "FLIC" if data == "small" else "FLIC-full"
def _make_builder_configs():
configs = []
for data in _DATA_OPTIONS:
configs.append(
FlicConfig(name=data, version=tfds.core.Version("2.0.0"), data=data))
return configs
class Flic(tfds.core.GeneratorBasedBuilder):
"""Frames Labeled In Cinema (FLIC)."""
BUILDER_CONFIGS = _make_builder_configs()
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image":
tfds.features.Image(
shape=(480, 720, 3), encoding_format="jpeg"),
"poselet_hit_idx":
tfds.features.Sequence(tf.uint16),
"moviename":
tfds.features.Text(),
"xcoords":
tfds.features.Sequence(tf.float64),
"ycoords":
tfds.features.Sequence(tf.float64),
"currframe":
tfds.features.Tensor(shape=(), dtype=tf.float64),
"torsobox":
tfds.features.BBoxFeature(),
}),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
extract_path = dl_manager.download_and_extract(self.builder_config.url)
mat_path = os.path.join(extract_path, self.builder_config.dir,
"examples.mat")
with tf.io.gfile.GFile(mat_path, "rb") as f:
data = tfds.core.lazy_imports.scipy.io.loadmat(
f, struct_as_record=True, squeeze_me=True, mat_dtype=True)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"extract_path": extract_path,
"data": data,
"selection_column": 7, # indicates train split selection
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"extract_path": extract_path,
"data": data,
"selection_column": 8, # indicates test split selection
},
),
]
def _generate_examples(self, extract_path, data, selection_column):
"""Yields examples."""
for u_id, example in enumerate(data["examples"]):
if example[selection_column]:
img_path = os.path.join(extract_path, self.builder_config.dir, "images",
example[3])
yield u_id, {
"image": img_path,
"poselet_hit_idx": example[0],
"moviename": example[1],
"xcoords": example[2][0],
"ycoords": example[2][1],
"currframe": example[5],
"torsobox": _normalize_bbox(example[6], img_path),
}
| apache-2.0 | -7,529,421,852,458,606,000 | 33.751479 | 93 | 0.629491 | false |
oz123/radiopy | rpy.py | 1 | 1118 | from subprocess import Popen
import pty
import os
import sys
import pynotify
def parse_ICY(line):
aline = line.split('\r\n')[-1]
junk, info = aline.split('=', 1)
try:
info, junk = info.split(';', 1)
except ValueError:
pass
artist, title = info.split('-')
return artist.strip("'"), title.strip("'")
cmd = ['mplayer',
'-playlist', 'http://www.radioparadise.com/musiclinks/rp_128aac.m3u']
if sys.argv[1:]:
cmd = cmd[:1] + sys.argv[1:] + cmd[1:]
master, slave = pty.openpty()
proc = Popen(cmd, stdout=slave, stderr=slave)
stdout = os.fdopen(master)
ICYSTRING = ''
while True:
line = stdout.readline(1)
ICYSTRING = ICYSTRING + line
if 'ICY Info' in ICYSTRING:
for i in range(80):
ICYSTRING = ICYSTRING + stdout.readline(1)
a, t = parse_ICY(ICYSTRING)
ICYSTRING = ''
n = pynotify.Notification(a, t)
n.set_timeout(10000) # 10 sec
n.set_category("device")
pynotify.init("Timekpr notification")
n.show()
pynotify.uninit()
ICYSTRING = ''
sys.stdout.write(line)
| gpl-3.0 | 1,780,432,038,875,164,700 | 23.844444 | 76 | 0.592129 | false |
thom-at-redhat/cfme_tests | cfme/tests/intelligence/test_download_report.py | 1 | 1858 | # -*- coding: utf-8 -*-
import pytest
import os
import shutil
from cfme.intelligence.reports import reports
from utils.providers import setup_a_provider as _setup_a_provider
from utils.wait import wait_for
from utils import browser
TIMEOUT = 60.0 # Wait time for download
def clean_temp_directory():
""" Clean the temporary directory.
"""
for root, dirs, files in os.walk(browser.firefox_profile_tmpdir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
@pytest.fixture(scope="module")
def setup_a_provider():
_setup_a_provider("infra")
@pytest.fixture
def needs_firefox():
""" Fixture which skips the test if not run under firefox.
I recommend putting it in the first place.
"""
if browser.browser().name != "firefox":
pytest.skip(msg="This test needs firefox to run")
@pytest.fixture(scope="module")
def report():
path = ["Configuration Management", "Virtual Machines", "Hardware Information for VMs"]
return reports.CannedSavedReport(path, reports.queue_canned_report(*path))
@pytest.skip # To be removed when we have solved the Docker issue
@pytest.mark.parametrize("filetype", ["txt", "csv"])
@pytest.sel.go_to('dashboard')
def test_download_report_firefox(needs_firefox, setup_a_provider, report, filetype):
""" Download the report as a file and check whether it was downloaded.
This test skips for PDF as there are some issues with it.
BZ#1021646
"""
extension = "." + filetype
clean_temp_directory()
report.download(filetype)
wait_for(
lambda: any(
[file.endswith(extension)
for file
in os.listdir(browser.firefox_profile_tmpdir)]
),
num_sec=TIMEOUT
)
clean_temp_directory()
| gpl-2.0 | 5,754,577,418,200,409,000 | 27.151515 | 91 | 0.661464 | false |
HartBlanc/Mastercard_Exchange_Rates | rate__retriever.py | 1 | 7513 | # 151 currencies, 22650 currency pairs, 364 days (period 1) 134 days (period 2) => 3,035,100(20,100/from_c) - 8,221,950 entries(8361.157)
print('importing packages')
import time
import sqlite3
import json
import requests
import datetime
import math
import pytz
from datetime import date
from multiprocessing.pool import Pool
print('connecting to db')
conn = sqlite3.connect('mastercard_1.sqlite')
cur = conn.cursor()
print('defining functions')
def day_calculator(date):
return (date - date_1).days + 1
def date_calculator(day):
return date_1+datetime.timedelta(day-1)
def date_stringer(date):
return date.strftime('%Y-%m-%d')
print('defining constants')
start_from_id = int(input('from_id initial value: '))
start_to_id = int(input('to_id initial value: '))
base_url = 'https://www.mastercard.us/settlement/currencyrate/fxDate={date_};transCurr={from_};crdhldBillCurr={to_};bankFee=0.00;transAmt=1/conversion-rate'
first_date=date(2016,2,29)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
date_string = date_stringer(date_1)
print('first date in period', date_1, 'today:',today)
late_day=day_calculator(date(2016,10,14))
print('grabbing codes from db')
cur.execute('SELECT code FROM Currency_Codes')
code_tuples=cur.fetchall()
codes = [ x[0] for x in code_tuples ]
number_of_codes = len(codes)
print('initiating')
for code in codes[(start_from_id-1):]:
start_time_f = datetime.datetime.now()
to_id = start_to_id
from_c = code
cur.execute('SELECT id FROM Currency_Codes WHERE code=?', (from_c,))
from_id = cur.fetchone()[0]
while to_id <= number_of_codes:
start_time_t = datetime.datetime.now()
to_c = codes[to_id-1]
print(from_c,to_c)
if from_c is to_c:
to_id +=1
continue
#### FIND START DATE - FIRST CHECKS LATE DAY, THEN FIRST DAY, THEN DOES BINARY SEARCH
lower_bound=1
upper_bound=late_day
day_i=late_day-1
while upper_bound != lower_bound:
date_i = date_calculator(day_i)
if day_i < late_day-4:
if date_i.weekday() == 6:
if lower_bound <= day_i-2 :
day_i=day_i-2
if date_i.weekday() == 5:
if lower_bound <= day_i-1:
day_i=day_i-1
date_i = date_calculator(day_i)
date_string_i=date_stringer(date_i)
url=base_url.format(date_=date_string_i,from_=from_c,to_=to_c)
print(date_string_i,'day number:', day_i,'day of the week:', date_i.weekday())
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
lower_bound = day_i+1
if day_i==late_day-1:
day_i=late_day
break
else:
day_i=math.ceil((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
upper_bound = day_i
if day_i == late_day-1:
day_i=1
elif day_i == 1:
break
else:
day_i=math.floor((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
#### Extract rates for period up to today
day=day_i
date=date_calculator(day_i)
print('found start day')
start_=datetime.datetime.now()
while (today - date).days >=0:
if day < late_day-4:
if date.weekday() == 5:
day = day + 2
date = date_calculator(day)
date_string=date_stringer(date)
url=base_url.format(date_=date_string,from_=from_c,to_=to_c)
print(date)
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
day = day + 1
date = date_calculator(day)
continue
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
rate = JSON['data']['conversionRate']
day = day_calculator(date)
print(rate)
date_id=(date_1-first_date).days+day
cur.execute('''INSERT OR REPLACE INTO Rates
(rate, from_id, to_id, date_id)
VALUES ( ?, ?, ?, ?)''',
(rate, from_id, to_id, date_id) )
day = day + 1
date = date_calculator(day)
end_ = datetime.datetime.now()
print(from_c,'Duration: {}'.format(end_ - start_))
to_id +=1
conn.commit()
end_time_t = datetime.datetime.now()
print(to_c,'Duration: {}'.format(end_time_t - start_time_t))
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
end_time_f = datetime.datetime.now()
print(from_c,'Duration: {}'.format(end_time_f - start_time_f))
print('done')
| mit | 8,326,743,759,790,757,000 | 35.64878 | 156 | 0.508585 | false |
tecan/xchat-rt | plugins/scripts/cancelbot-code-55/dictbot/dictbot.py | 1 | 6363 | #!/usr/bin/python
__module_name__ = "Cancel's DictBot"
__module_version__ = "3.0.1"
__module_description__ = "DictBot by Cancel"
import xchat
import os
import re
import string
import ConfigParser
from DictService_client import *
print "\0034",__module_name__, __module_version__,"has been loaded\003"
#the globals go here
option = {}
xchatdir = xchat.get_info("xchatdir")
inifile = os.path.join(xchatdir, "dictbot.ini")
color = {"white":"\0030", "black":"\0031", "blue":"\0032", "green":"\0033", "red":"\0034",
"dred":"\0035", "purple":"\0036", "dyellow":"\0037", "yellow":"\0038", "bgreen":"\0039",
"dgreen":"\00310", "green":"\00311", "blue":"\00312", "bpurple":"\00313", "dgrey":"\00314",
"lgrey":"\00315", "close":"\003"}
dictionaries = {}
loc = DictServiceLocator()
port = loc.getDictServiceSoap()
#the functions go here
def loadVars():
global option, proxy
try:
config = ConfigParser.ConfigParser()
infile = open(inifile)
config.readfp(infile)
infile.close()
#Parse main
#for item in config.items("main"):
#option[item[0]] = item[1]
option["service"] = config.getboolean("main", "service")
option["charlimit"] = config.getint("main", "charlimit")
option["defdict"] = config.get("main", "defdict")
option["deflimit"] = config.getint("main", "deflimit")
print color["dgreen"], "CancelBot DictBot dictbot.ini Load Success"
except EnvironmentError:
print color["red"], "Could not open dictbot.ini put it in your " + xchatdir
def onText(word, word_eol, userdata):
global option
destination = xchat.get_context()
trigger = re.split(' ',string.lower(word[1]))
triggernick = word[0]
if trigger[0] == '!define' and option["service"] == True:
lookup = string.join(trigger[1:], '+')
getDefinition(option["defdict"], lookup, destination)
elif trigger[0] == '!lookin' and dictionaries.has_key(trigger[1]) and option["service"] == True:
dictid = trigger[1]
lookup = string.join(trigger[2:], '+')
getDefinition(dictid, lookup, destination)
elif trigger[0] == '!dictionaries' and option["service"] == True:
getDictionaries(triggernick)
def onPvt(word, word_eol, userdata):
destination = xchat.get_context()
triggernick = word[0]
trigger = re.split(' ',string.lower(word[1]))
if trigger[0] == '!define' and option["service"] == True:
lookup = string.join(trigger[1:], '+')
getDefinition(option["defdict"], lookup, destination)
elif trigger[0] == '!lookin' and dictionaries.has_key(trigger[1]) and option["service"] == True:
dictid = trigger[1]
lookup = string.join(trigger[2:], '+')
getDefinition(dictid, lookup, destination)
elif trigger[0] == '!dictionaries' and option["service"] == True:
getDictionaries(triggernick)
def getDefinition(dictid, lookup, destination):
defcounter = 0
request = DefineInDictSoapIn()
request._dictId = dictid
request._word = lookup
response = port.DefineInDict(request)
if(len(response._DefineInDictResult._Definitions._Definition) == 0):
destination.command("say " + " nothing found check spelling or look in another dictionary using !lookin dictcode word")
else:
for definition in response._DefineInDictResult._Definitions._Definition:
defcounter += 1
#result = response._DefineInDictResult._Definitions._Definition[0]._WordDefinition
result = definition._WordDefinition
result = result.replace('\n', '')
result = result.replace(' ', '')
destination.command("say " + lookup + " in " + dictionaries[dictid])
if (len(result) >= option["charlimit"]):
destination.command("say " + result[:option["charlimit"]] + " [truncated..]")
else:
destination.command("say " + result)
if defcounter >= option["deflimit"]:
return
def getDictionaryList():
global dictionaries
request = DictionaryListSoapIn()
response = port.DictionaryList(request)
for dictionary in response._DictionaryListResult._Dictionary:
dictionaries[dictionary._Id] = dictionary._Name
def getDictionaries(triggernick):
for key in dictionaries.keys():
xchat.command("msg " + triggernick + " " + color["red"] + key + color["black"] + " " + dictionaries[key])
def localDefine(word, word_eol, userdata):
request = DefineInDictSoapIn()
if word[0] == 'define':
request._dictId = option["defdict"]
request._word = string.join(word_eol[1], '+')
response = port.DefineInDict(request)
if(len(response._DefineInDictResult._Definitions._Definition) == 0):
print " nothing found check spelling or look in another dictionary using !lookin dictcode word"
else:
for definition in response._DefineInDictResult._Definitions._Definition:
print definition._WordDefinition
#print response._DefineInDictResult._Definitions._Definition[0]._WordDefinition
elif word[0] == 'lookin':
request._dictId= word[1]
request._word = string.join(word_eol[2], '+')
response = port.DefineInDict(request)
if(len(response._DefineInDictResult._Definitions._Definition) == 0):
print " nothing found check spelling or look in another dictionary using !lookin dictcode word"
else:
for definition in response._DefineInDictResult._Definitions._Definition:
print definition._WordDefinition
#print response._DefineInDictResult._Definitions._Definition[0]._WordDefinition
elif word[0] == 'dictionaries':
for key in dictionaries.keys():
print key + ":" + dictionaries[key]
return xchat.EAT_ALL
loadVars()
getDictionaryList()
#The hooks go here
xchat.hook_print('Channel Message', onText)
xchat.hook_print('Private Message to Dialog', onPvt)
xchat.hook_command('define', localDefine, "usage: define word")
xchat.hook_command('dictionaries', localDefine, "list the dictionaries")
xchat.hook_command('lookin', localDefine,"lookin wn word")
#LICENSE GPL
#Last modified 12-24-07
| gpl-2.0 | -6,841,493,494,672,742,000 | 39.272152 | 127 | 0.63272 | false |
jason2506/bptree | conanfile.py | 1 | 1231 | from conans import ConanFile, CMake
class BPTreeConan(ConanFile):
name = 'bptree'
version = '0.0.0'
url = 'https://github.com/jason2506/bptree-cpp'
license = 'MIT'
author = 'Chi-En Wu'
settings = ('os', 'compiler', 'build_type', 'arch')
generators = ('cmake', 'txt')
no_copy_source = True
options = {
'enable_conan': [True, False],
}
default_options = (
'gtest:shared=False',
'enable_conan=True',
)
exports = (
'CMakeLists.txt',
'cmake/*.cmake',
'include/*.hpp',
'test/CMakeLists.txt',
'test/*.cpp',
)
def requirements(self):
if self.develop:
self.requires('gtest/1.8.0@lasote/stable')
def build(self):
enable_testing = 'gtest' in self.deps_cpp_info.deps
cmake = CMake(self)
cmake.configure(defs={
'ENABLE_CONAN': self.options.enable_conan,
'BUILD_TESTING': enable_testing,
})
cmake.build()
if enable_testing:
cmake.test()
cmake.install()
def package(self):
# files are copied by cmake.install()
pass
def package_id(self):
self.info.header_only()
| mit | -3,906,506,703,934,059,500 | 21.381818 | 59 | 0.540211 | false |
linebp/pandas | pandas/tests/reshape/test_reshape.py | 1 | 43476 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas.core.reshape.reshape import (
melt, lreshape, get_dummies, wide_to_long)
import pandas.util.testing as tm
from pandas.compat import range, u
class TestMelt(object):
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[1.067683, -1.110463, 0.20867
], [-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ['variable', 'value']
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(),
melt(self.df))
tm.assert_frame_equal(self.df.melt(id_vars=['id1', 'id2'],
value_vars=['A', 'B']),
melt(self.df,
id_vars=['id1', 'id2'],
value_vars=['A', 'B']))
tm.assert_frame_equal(self.df.melt(var_name=self.var_name,
value_name=self.value_name),
melt(self.df,
var_name=self.var_name,
value_name=self.value_name))
tm.assert_frame_equal(self.df1.melt(col_level=0),
melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ['variable', 'value']
result1 = self.df.melt(id_vars=['id1'])
assert result1.columns.tolist() == ['id1', 'variable', 'value']
result2 = self.df.melt(id_vars=['id1', 'id2'])
assert result2.columns.tolist() == ['id1', 'id2', 'variable', 'value']
def test_value_vars(self):
result3 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A')
assert len(result3) == 10
result4 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=['id1', 'id2'],
value_vars=type_(('A', 'B')))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame({
('A', 'a'): self.df1[('A', 'a')],
'CAP': ['B'] * len(self.df1),
'low': ['b'] * len(self.df1),
'value': self.df1[('B', 'b')],
}, columns=[('A', 'a'), 'CAP', 'low', 'value'])
result = self.df1.melt(id_vars=[('A', 'a')], value_vars=[('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ('A', 'a')
list_a = [tuple_a]
tuple_b = ('B', 'b')
list_b = [tuple_b]
for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b),
(tuple_a, tuple_b)):
with tm.assert_raises_regex(ValueError, r'MultiIndex'):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ['var', 'value']
result6 = self.df.melt(id_vars=['id1'], var_name=self.var_name)
assert result6.columns.tolist() == ['id1', 'var', 'value']
result7 = self.df.melt(id_vars=['id1', 'id2'], var_name=self.var_name)
assert result7.columns.tolist() == ['id1', 'id2', 'var', 'value']
result8 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name)
assert result8.columns.tolist() == ['id1', 'id2', 'var', 'value']
result9 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ['variable', 'val']
result11 = self.df.melt(id_vars=['id1'], value_name=self.value_name)
assert result11.columns.tolist() == ['id1', 'variable', 'val']
result12 = self.df.melt(id_vars=['id1', 'id2'],
value_name=self.value_name)
assert result12.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result13 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
value_name=self.value_name)
assert result13.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result14 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable',
self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name,
value_name=self.value_name)
assert result15.columns.tolist() == ['var', 'val']
result16 = self.df.melt(id_vars=['id1'], var_name=self.var_name,
value_name=self.value_name)
assert result16.columns.tolist() == ['id1', 'var', 'val']
result17 = self.df.melt(id_vars=['id1', 'id2'],
var_name=self.var_name,
value_name=self.value_name)
assert result17.columns.tolist() == ['id1', 'id2', 'var', 'val']
result18 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name,
value_name=self.value_name)
assert result18.columns.tolist() == ['id1', 'id2', 'var', 'val']
result19 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name,
value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name,
self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = df20.melt()
assert result20.columns.tolist() == ['foo', 'value']
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level='CAP')
assert res1.columns.tolist() == ['CAP', 'value']
assert res2.columns.tolist() == ['CAP', 'value']
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ['CAP', 'low', 'value']
class TestGetDummies(object):
sparse = False
def setup_method(self, method):
self.df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
def test_basic(self):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': {0: 1,
1: 0,
2: 0},
'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected)
assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected)
expected.index = list('ABC')
assert_frame_equal(
get_dummies(s_series_index, sparse=self.sparse), expected)
def test_basic_types(self):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype='uint8',
columns=list('abc'))
if not self.sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_series, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
tm.assert_series_equal(result.get_dtype_counts(),
Series({'uint8': 8}))
result = get_dummies(s_df, sparse=self.sparse, columns=['a'])
expected = Series({'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=self.sparse)
res_series = get_dummies(just_na_series, sparse=self.sparse)
res_series_index = get_dummies(just_na_series_index,
sparse=self.sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=self.sparse)
exp = DataFrame({'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=self.sparse)
exp_na = DataFrame({nan: {0: 0, 1: 0, 2: 1},
'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}},
dtype=np.uint8)
exp_na = exp_na.reindex_axis(['a', 'b', nan], 1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=np.uint8)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self
): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=self.sparse)
exp = DataFrame({'letter_e': {0: 1,
1: 0,
2: 0},
u('letter_%s') % eacute: {0: 0,
1: 1,
2: 1}},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self):
df = self.df
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self):
prefixes = ['from_A', 'from_B']
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'from_A_a', 'from_A_b', 'from_B_b',
'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self):
# not that you should do this...
df = self.df
result = get_dummies(df, prefix='bad', sparse=self.sparse)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'],
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
df = self.df
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self):
df = self.df
result = get_dummies(df, prefix_sep='..', sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]})
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..',
'B': '__'}, sparse=self.sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix=['too few'], sparse=self.sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse)
def test_dataframe_dummies_prefix_dict(self):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'A_nan',
'B_b', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c',
'cat_x', 'cat_y']]
assert_frame_equal(result, expected)
def test_basic_drop_first(self):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, sparse=self.sparse, drop_first=True)
exp = DataFrame({'b': {0: 0,
1: 1,
2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_na = DataFrame({'b': {0: 0,
1: 1,
2: 0},
nan: {0: 0,
1: 0,
2: 1}}, dtype=np.uint8).reindex_axis(
['b', nan], 1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse,
drop_first=True)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse,
drop_first=True)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(np.uint8)
result = pd.get_dummies(data, columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat)
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.uint8)
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols)
tm.assert_frame_equal(result, expected)
class TestGetDummiesSparse(TestGetDummies):
sparse = True
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
class TestLreshape(object):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2':
['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 1454, 3139,
4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 103, 104, 105, 101,
104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009',
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009', '20dec2008',
'30dec2008', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 3301, 1454,
3139, 4133, 1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009', nan,
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009',
'15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0, nan,
1892.0, 3338.0, 4575.0, 2293.0, nan, nan, 3377.0,
4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
pytest.raises(ValueError, lreshape, df, spec)
class TestWideToLong(object):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970": {0: "a",
1: "b",
2: "c"},
"A1980": {0: "d",
1: "e",
2: "f"},
"B1970": {0: 2.5,
1: 1.2,
2: .7},
"B1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i='id', j='age') # noqa
assert stubs == ['inc', 'edu']
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A.1970": {0: "a",
1: "b",
2: "c"},
"A.1980": {0: "d",
1: "e",
2: "f"},
"B.1970": {0: 2.5,
1: 1.2,
2: .7},
"B.1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(long_frame, exp_frame)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A(quarterly)1970": {0: "a",
1: "b",
2: "c"},
"A(quarterly)1980": {0: "d",
1: "e",
2: "f"},
"B(quarterly)1970": {0: 2.5,
1: 1.2,
2: .7},
"B(quarterly)1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
long_frame = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': ['X1', 'X1', 'X2', 'X2'],
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
'year': ['2010', '2011', '2010', '2011']}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame, exp_frame)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'BBBX': [91, 92, 93],
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = 'nope!'
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'A2010': [],
'A2011': [],
'B2010': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'Arating': [91, 92, 93],
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame({'Aone': [1.0, 2.0],
'Atwo': [3.0, 4.0],
'Bone': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'Aone': [],
'Atwo': [],
'Bone': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'Aone', 'Atwo', 'Bone', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame({
'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
exp_frame = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
'age': ['1', '2', '1', '2', '1', '2', '1', '2', '1',
'2', '1', '2', '1', '2', '1', '2', '1', '2']
})
exp_frame = exp_frame.set_index(['famid', 'birth', 'age'])[['ht']]
long_frame = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
tm.assert_frame_equal(long_frame, exp_frame)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame({
'A_A1': [1, 2, 3, 4, 5],
'B_B1': [1, 2, 3, 4, 5],
'x': [1, 1, 1, 1, 1]
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
| bsd-3-clause | -60,484,100,107,960,040 | 42.959555 | 79 | 0.426281 | false |
xclxxl414/rqalpha | rqalpha/mod/rqalpha_mod_alphaStar_utils/mod.py | 1 | 2371 | #coding=utf-8
"""
@author: evilXu
@file: mod.py
@time: 2018/2/28 16:59
@description:
"""
from rqalpha.interface import AbstractMod
from rqalpha.utils.logger import system_log,user_system_log
import pandas as pd
from rqalpha.api import *
class UtilsMod(AbstractMod):
def __init__(self):
self._inject_api()
def start_up(self, env, mod_config):
system_log.debug("UtilsMod.start_up,config:{0}",mod_config)
def tear_down(self, code, exception=None):
pass
# print(">>> AlphaHDataMode.tear_down")
def _inject_api(self):
from rqalpha import export_as_api
from rqalpha.execution_context import ExecutionContext
from rqalpha.const import EXECUTION_PHASE
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def equalWeight_order(tobe_holding_codes=[], context=None):
user_system_log.info("equalWeight_order:{}",str(tobe_holding_codes))
if len(tobe_holding_codes) < 1:
for code, pos in context.portfolio.positions.items():
if pos.sellable > 0:
order_shares(code, -1 * pos.sellable)
return
# print("positions",context.portfolio.positions)
_target_percent = round(1.0 / len(tobe_holding_codes), 2)
_targets = set(tobe_holding_codes)
_tobe_sell = [pos for code, pos in context.portfolio.positions.items() if code not in _targets]
for pos in _tobe_sell:
if pos.sellable > 0:
order_shares(pos.order_book_id, -1 * pos.sellable)
for code in tobe_holding_codes:
_acount = context.portfolio.stock_account
_cash_percent = round(_acount.cash / _acount.total_value, 2)
_real_percent = min(_cash_percent, _target_percent)
# print(_acount.cash,_acount.total_value,_cash_percent,_real_percent)
if _real_percent > 0:
order_target_percent(code, _real_percent)
return | apache-2.0 | -1,318,417,233,380,429,800 | 39.896552 | 107 | 0.566428 | false |
data-tsunami/museo-cachi | cachi/admin.py | 1 | 1605 | # -*- coding: utf-8 -*-
#======================================================================
# This file is part of "Museo-Cachi".
#
# Museo-Cachi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Museo-Cachi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Museo-Cachi. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
from __future__ import unicode_literals
from cachi import models
from django.contrib import admin
admin.site.register(models.PiezaConjunto)
admin.site.register(models.Fragmento)
admin.site.register(models.FichaTecnica)
admin.site.register(models.Adjunto)
admin.site.register(models.TipoAdquisicion)
admin.site.register(models.TipoCondicionHallazgo)
admin.site.register(models.Naturaleza)
admin.site.register(models.Persona)
admin.site.register(models.Ubicacion)
admin.site.register(models.InformeCampo)
admin.site.register(models.UbicacionGeografica)
admin.site.register(models.Procedencia)
admin.site.register(models.SitioArqueologico)
admin.site.register(models.FichaRelevamientoSitio)
admin.site.register(models.Modificacion)
| gpl-3.0 | 8,912,619,328,321,128,000 | 40.153846 | 73 | 0.71028 | false |
sciapp/pyMolDyn | src/config/configuration.py | 1 | 6186 | import configobj
import validate
import os
import os.path
import inspect
CONFIG_DIRECTORY = '~/.pymoldyn/' # MUST be written with ~ to save a path in the config file that is relative to the user's home directory
CONFIG_FILE = os.path.expanduser('%s/config.cfg' % CONFIG_DIRECTORY)
CONFIG_SPEC_FILE = os.path.expanduser('%s/config.spec' % CONFIG_DIRECTORY)
# second string is the list type name
type_dict = {
int: ('integer', 'int'),
float: ('float', 'float'),
str: ('string', 'string'),
unicode: ('string', 'string'),
bool: ('boolean', 'bool'),
}
class ConfigNode(object):
def __init__(self):
pass
class Configuration(ConfigNode):
"""
Configuration Object that contains the application settings
"""
class Colors(ConfigNode):
def __init__(self):
self.surface_cavity = [0.2, 0.4, 1.]
self.domain = [0., 1., 0.5]
self.center_cavity = [0.9, 0.4, 0.2]
self.background = [0.0, 0.0, 0.0]
self.bounding_box = [1.0, 1.0, 1.0]
self.bonds = [0.8, 0.8, 0.8]
class OpenGL(ConfigNode):
def __init__(self):
# camera_position =
# offset = (0.0, 0.0, 0.0)
self.gl_window_size = [1200, 400]
self.atom_radius = 0.4
self.bond_radius = 0.1
pass
class Computation(ConfigNode):
def __init__(self):
self.std_cutoff_radius = 2.8
self.std_resolution = 64
self.max_cachefiles = 0
class Path(ConfigNode):
def __init__(self):
self.cache_dir = os.path.join(CONFIG_DIRECTORY, 'cache')
self.ffmpeg = '/usr/local/bin/ffmpeg'
self.result_dir = os.path.join(CONFIG_DIRECTORY, 'results')
def __init__(self):
# standard configuration
self.Colors = Configuration.Colors()
self.OpenGL = Configuration.OpenGL()
self.Computation = Configuration.Computation()
self.Path = Configuration.Path()
self.window_position = [-1, -1]
self.recent_files = ['']
self.max_files = 5
self._file = ConfigFile(self)
def add_recent_file(self, filename):
if len(self.recent_files) == 1 and not self.recent_files[0]:
self.recent_files[0] = filename
elif len(self.recent_files) == self.max_files:
self.recent_files.pop(-1)
self.recent_files.insert(0,filename)
else:
self.recent_files.insert(0, filename)
self.save()
def save(self):
"""
write configuration to file
"""
self._file.save()
def read(self):
"""
read configuration from file
"""
self._file = ConfigFile(self)
self._file.read()
class ConfigFile(object):
"""
ConfigFile that parses the settings to a configuration file using ConfigObj 4
"""
def __init__(self, cfg):
self.config = cfg
@staticmethod
def _create_needed_parent_directories(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def generate_configspec(self):
"""
generates the type specification for the configuration data
"""
spec_file = configobj.ConfigObj(CONFIG_SPEC_FILE)
self.generate_spec_for_section(self.file, spec_file)
# TODO: better error handling
try:
self._create_needed_parent_directories(CONFIG_SPEC_FILE)
spec_file.write()
except IOError as e:
print "IOError in ConfigFile.generate_configspec"
def generate_spec_for_section(self, section, spec_section):
"""
recursive type specification for each subtree
"""
for scalar in section.scalars:
t = type(section[scalar])
type_string = type_dict[t][0] if t is not list else type_dict[type(section[scalar][0])][1] + '_list'
spec_section[scalar] = type_string
for sect in section.sections:
spec_section[sect] = {}
self.generate_spec_for_section(section[sect], spec_section[sect])
def save(self):
"""
recursively reads the object and saves it to the ConfigFile object and finally writes it into the file
"""
self.file = configobj.ConfigObj(CONFIG_FILE)
self.parse_node_to_section(self.config, self.file)
# TODO: better error handling
try:
self._create_needed_parent_directories(CONFIG_FILE)
self.file.write()
self.generate_configspec()
self.file.write()
except IOError as e:
print "IOError in ConfigFile.save"
def parse_node_to_section(self, node, section):
"""
parses a ConfigNode to file object
"""
for attr_str in dir(node):
attr = getattr(node, attr_str)
if isinstance(attr, ConfigNode):
section[type(attr).__name__] = {}
self.parse_node_to_section(attr, section[type(attr).__name__])
elif not inspect.ismethod(attr) and not attr_str.startswith('_'):
section[attr_str] = attr
else:
pass
# print attr_str, 'NOT PROCESSED'
def read(self):
"""
read a configuration from file
"""
if not os.path.isfile(CONFIG_SPEC_FILE) or not os.path.isfile(CONFIG_FILE):
self.save()
else:
validator = validate.Validator()
self.file = configobj.ConfigObj(CONFIG_FILE,
configspec=CONFIG_SPEC_FILE)
self.file.validate(validator)
self.parse_section_to_node(self.file, self.config)
def parse_section_to_node(self, section, node):
"""
parses a config section to config object
"""
for scalar in section.scalars:
setattr(node, scalar, section[scalar])
for sec in section.sections:
self.parse_section_to_node(section[sec], getattr(node, sec))
config = Configuration()
config.read()
| mit | 8,964,271,255,305,038,000 | 30.561224 | 140 | 0.57032 | false |
rosix-ru/django-directapps-client | setup.py | 1 | 2120 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Grigoriy Kramarenko <[email protected]>
#
# This file is part of DjangoDirectAppsClient.
#
# DjangoDirectAppsClient is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DjangoDirectAppsClient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with DjangoDirectAppsClient. If not, see
# <http://www.gnu.org/licenses/>.
#
from setuptools import setup, find_packages
from directapps_client import __version__
setup(
name='django-directapps-client',
version=__version__,
description='Django Direct Apps Client.',
long_description=open('README.rst').read(),
author='Grigoriy Kramarenko',
author_email='[email protected]',
url='https://github.com/rosix-ru/django-directapps-client/',
license='GNU Affero General Public License v3 or later (AGPLv3+)',
platforms='any',
zip_safe=False,
packages=find_packages(),
include_package_data = True,
install_requires=['django-directapps'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| agpl-3.0 | 4,842,296,416,597,150,000 | 38.259259 | 93 | 0.675943 | false |
jared0x90/ArenaGame | parse-cards.py | 1 | 1309 | #!/usr/bin/env python
# Use the built in json and sqlite library
import json
import sqlite3
def main():
# Running card counter
card_count = 0
# Load all json data into a collection
card_data = json.load(open('cards.collectible.json'))
# Connect to our database and creat table if it doesn't exist
db = sqlite3.connect('game.sqlite')
c = db.cursor()
query = "CREATE TABLE IF NOT EXISTS cards('card_game_id', 'rarity', 'set', 'class', 'card_name_en',UNIQUE(card_game_id));"
c.execute(query)
# Cycle through all the objects in the collection
for card in card_data:
if "HERO" not in card['id']:
card_count+=1
# Determine if it's a neutral card or a class card
if 'playerClass' not in card.keys():
card_class = "NEUTRAL"
else:
card_class = card['playerClass']
# Insert into database
new_card = ( card['id'], card['rarity'], card['set'], card_class, card['name'])
c.execute('REPLACE INTO cards VALUES(?,?,?,?,?)', new_card)
db.commit()
db.close()
print str(card_count) + ' cards were written to the database.'
# Boilerplate python
if __name__ == '__main__':
main()
| mit | -7,381,918,792,961,908,000 | 30.926829 | 126 | 0.565317 | false |
jpludens/quartrmastr | db/tables/equip_traits.py | 1 | 5244 | import sqlite3
from db import get_connection, get_from_datamaster, get_equip_keys
from db.tables import equips, traits
requirements = [equips, traits]
def build():
with get_connection() as con:
con.row_factory = sqlite3.Row
cur = con.cursor()
equip_ids_by_name = get_equip_keys(cur)
cur.execute("SELECT Text, Id FROM Traits")
trait_ids_by_text = {cur_row[0]: cur_row[1]
for cur_row in cur.fetchall()}
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DROP TABLE IF EXISTS EquipTraits")
cur.execute("CREATE TABLE EquipTraits("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Equip INTEGER, "
"Trait INTEGER, "
"FOREIGN KEY(Equip) REFERENCES Equips(Id), "
"FOREIGN KEY(Trait) REFERENCES Traits(Id))")
for csv_row in get_from_datamaster('EquipTraits.csv'):
cur.execute("INSERT INTO EquipTraits ("
"Equip, Trait)"
"VALUES (\"{}\", \"{}\")".format(
equip_ids_by_name[csv_row.get('EquipName')],
trait_ids_by_text[csv_row.get('Text')]))
def read():
con = get_connection()
con.row_factory = sqlite3.Row
with con:
cur = con.cursor()
cur.execute("SELECT "
"EquipTraits.Id AS Id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"ElementName AS element "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Elements "
"ON TraitElements.Element = Elements.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitElements "
"WHERE TraitElements.Trait = Traits.Id ")
element_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"StatusName AS status "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Statuses "
"ON TraitStatuses.Status = Statuses.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitStatuses "
"WHERE TraitStatuses.Trait = Traits.Id ")
stat_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"StatName AS stat "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Stats "
"ON TraitStats.Stat = Stats.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitStats "
"WHERE TraitStats.Trait = Traits.Id")
status_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"SkillName AS skillName "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitSkills "
"WHERE TraitSkills.Trait = Traits.Id")
skill_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"Miscprop AS property "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitMiscprops "
"WHERE TraitMiscprops.Trait = Traits.Id")
misc_traits = [dict(row) for row in cur.fetchall()]
return element_traits + stat_traits + status_traits + skill_traits + misc_traits
| mit | 2,389,829,314,110,966,300 | 40.291339 | 88 | 0.466438 | false |
aarontuor/antk | antk/scripts/dssmgrid.py | 1 | 2443 | from __future__ import print_function
import argparse
from antk.core import loader
import numpy
def return_parser():
parser = argparse.ArgumentParser(description="Command line utility for performing grid search on a matrix factorization model.")
parser.add_argument("datadir", type=str,
help="data directory for conducting search.")
parser.add_argument("configfile", type=str,
help="Config file for conducting search.")
parser.add_argument("logfile", type=str,
help="log file for conducting search.")
return parser
if __name__ == '__main__':
args = return_parser().parse_args()
data = loader.read_data_sets(args.datadir, folders=['item', 'user', 'dev', 'test', 'train'])
data.train.labels['ratings'] = loader.center(data.train.labels['ratings'])
data.dev.labels['ratings'] = loader.center(data.dev.labels['ratings'])
data.user.features['age'] = loader.center(data.user.features['age'])
data.item.features['year'] = loader.center(data.item.features['year'])
data.user.features['age'] = loader.max_norm(data.user.features['age'])
data.item.features['year'] = loader.max_norm(data.item.features['year'])
data.dev.features['time'] = loader.center(data.dev.features['time'])
data.dev.features['time'] = loader.max_norm(data.dev.features['time'])
data.train.features['time'] = loader.center(data.train.features['time'])
data.train.features['time'] = loader.max_norm(data.train.features['time'])
# x = dsmodel.dssm(data, args.configfile)
mb = [500, 1000, 10000, 20000, 40000, 80000,50, 100, 200]
arguments = [[data],
[args.configfile],
[0.00001],
[2, 5, 10, 20, 50, 100, 200, 500, 1000],
[0.0001, 0.001, 0.01, 0.1, 0.3, 1],
mb,
[0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
[True],
map(lambda x: 10*numpy.ceil(numpy.log(data.train.num_examples / x)), mb),
[1000],
[500]]
argumentnames = ['data',
'config',
'initrange',
'kfactors',
'lamb',
'mb',
'learnrate',
'verbose',
'maxbadcount',
'epochs',
'random_seed']
# antsearch.gridsearch(args.logfile, '.', 'dsmodel', 'dssm', arguments, argumentnames)
| mit | -7,314,639,539,161,021,000 | 41.859649 | 132 | 0.57675 | false |
kidscancode/gamedev | tutorials/examples/pathfinding/part2_test.py | 1 | 4760 | import pygame as pg
from collections import deque
from os import path
vec = pg.math.Vector2
TILESIZE = 48
GRIDWIDTH = 28
GRIDHEIGHT = 15
WIDTH = TILESIZE * GRIDWIDTH
HEIGHT = TILESIZE * GRIDHEIGHT
FPS = 30
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
YELLOW = (255, 255, 0)
DARKGRAY = (40, 40, 40)
LIGHTGRAY = (140, 140, 140)
pg.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
clock = pg.time.Clock()
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
self.connections = [vec(1, 0), vec(-1, 0), vec(0, 1), vec(0, -1)]
def in_bounds(self, node):
return 0 <= node.x < self.width and 0 <= node.y < self.height
def passable(self, node):
return node not in self.walls
def find_neighbors(self, node):
neighbors = [node + connection for connection in self.connections]
if (node.x + node.y) % 2 == 0:
neighbors.reverse()
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def draw(self):
for wall in self.walls:
rect = pg.Rect(wall * TILESIZE, (TILESIZE, TILESIZE))
pg.draw.rect(screen, LIGHTGRAY, rect)
def vec2int(v):
return (int(v.x), int(v.y))
def flow_field(graph, start):
frontier = deque()
frontier.append(start)
path = {}
path[vec2int(start)] = None
while len(frontier) > 0:
current = frontier.popleft()
for next in graph.find_neighbors(current):
if vec2int(next) not in path:
frontier.append(next)
path[vec2int(next)] = current - next
return path
def draw_grid():
for x in range(0, WIDTH, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (0, y), (WIDTH, y))
x = start.x * TILESIZE + TILESIZE / 2
y = start.y * TILESIZE + TILESIZE / 2
r = star_img.get_rect(center=(x, y))
screen.blit(star_img, r)
icon_dir = path.join(path.dirname(__file__), '../icons')
star_img = pg.image.load(path.join(icon_dir, 'star.png')).convert_alpha()
star_img.fill((0, 255, 0, 255), special_flags=pg.BLEND_RGBA_MULT)
star_img = pg.transform.scale(star_img, (50, 50))
arrows = {}
arrow_img = pg.image.load(path.join(icon_dir, 'arrowRight.png')).convert_alpha()
arrow_img = pg.transform.scale(arrow_img, (50, 50))
for dir in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
arrows[dir] = pg.transform.rotate(arrow_img, vec(dir).angle_to(vec(1, 0)))
g = SquareGrid(GRIDWIDTH, GRIDHEIGHT)
walls = [(10, 7), (11, 7), (12, 7), (13, 7), (14, 7), (15, 7), (16, 7), (7, 7), (6, 7), (5, 7), (5, 5), (5, 6), (1, 6), (2, 6), (3, 6), (5, 10), (5, 11), (5, 12), (5, 9), (5, 8), (12, 8), (12, 9), (12, 10), (12, 11), (15, 14), (15, 13), (15, 12), (15, 11), (15, 10), (17, 7), (18, 7), (21, 7), (21, 6), (21, 5), (21, 4), (21, 3), (22, 5), (23, 5), (24, 5), (25, 5), (18, 10), (20, 10), (19, 10), (21, 10), (22, 10), (23, 10), (14, 4), (14, 5), (14, 6), (14, 0), (14, 1), (9, 2), (9, 1), (7, 3), (8, 3), (10, 3), (9, 3), (11, 3), (2, 5), (2, 4), (2, 3), (2, 2), (2, 0), (2, 1), (0, 11), (1, 11), (2, 11), (21, 2), (20, 11), (20, 12), (23, 13), (23, 14), (24, 10), (25, 10), (6, 12), (7, 12), (10, 12), (11, 12), (12, 12), (5, 3), (6, 3), (5, 4)]
for wall in walls:
g.walls.append(vec(wall))
start = vec(0, 0)
path = flow_field(g, start)
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
if event.key == pg.K_m:
# dump the wall list for saving
print([(int(loc.x), int(loc.y)) for loc in g.walls])
if event.type == pg.MOUSEBUTTONDOWN:
mpos = vec(pg.mouse.get_pos()) // TILESIZE
if event.button == 1:
if mpos in g.walls:
g.walls.remove(mpos)
else:
g.walls.append(mpos)
if event.button == 3:
start = mpos
path = flow_field(g, start)
pg.display.set_caption("{:.2f}".format(clock.get_fps()))
screen.fill(DARKGRAY)
draw_grid()
g.draw()
for n, d in path.items():
if d:
x, y = n
x = x * TILESIZE + TILESIZE / 2
y = y * TILESIZE + TILESIZE / 2
img = arrows[vec2int(d)]
r = img.get_rect(center=(x, y))
screen.blit(img, r)
pg.display.flip()
| mit | -7,233,398,497,294,835,000 | 35.615385 | 744 | 0.532563 | false |
nimbis/django-guardian | guardian/view_mixins.py | 1 | 5462 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.utils.http import urlquote
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
"""
A login required mixin for use with class based views. This Class is a light wrapper around the
`login_required` decorator and hence function parameters are just attributes defined on the class.
Due to parent class order traversal this mixin must be added as the left most
mixin of a view.
The mixin has exaclty the same flow as `login_required` decorator:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing the current
absolute path in the query string. Example: /accounts/login/?next=/polls/3/.
If the user is logged in, execute the view normally. The view code is free to
assume the user is logged in.
**Class Settings**
`redirect_field_name - defaults to "next"
`login_url` - the login url of your site
"""
redirect_field_name = REDIRECT_FIELD_NAME
login_url = None
@method_decorator(login_required(redirect_field_name=redirect_field_name, login_url=login_url))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class PermissionRequiredMixin(object):
"""
A view mixin that verifies if the current logged in user has the specified permission
by wrapping the ``request.user.has_perm(..)`` method.
If a `get_object()` method is defined either manually or by including another mixin (for example
``SingleObjectMixin``) or ``self.object`` is defiend then the permission will be tested against
that specific instance.
.. NOTE: Testing of a permission against a specific object instance requires an authentication backend
that supports. Please see ``django-guardian`` to add object level permissions to your project.
The mixin does the following:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing the current
absolute path in the query string. Example: /accounts/login/?next=/polls/3/.
If the `raise_exception` is set to True than rather than redirect to login page
a `PermisionDenied` (403) is raised.
If the user is logged in, and passes the permission check than the view is executed
normally.
**Example Usage**
class FitterEditView(PermissionRequiredMixin, UpdateView):
...
### PermissionRequiredMixin settings
permission_required = 'fitters.change_fitter'
### other view settings
context_object_name="fitter"
queryset = Fitter.objects.all()
form_class = FitterForm
...
**Class Settings**
`permission_required` - the permission to check of form "<app_label>.<permission codename>"
i.e. 'polls.can_vote' for a permission on a model in the polls application.
`login_url` - the login url of your site
`redirect_field_name - defaults to "next"
`raise_exception` - defaults to False - raise PermisionDenied (403) if set to True
"""
### default class view settings
login_url = settings.LOGIN_URL
raise_exception = False
permission_required = None
redirect_field_name=REDIRECT_FIELD_NAME
def dispatch(self, request, *args, **kwargs):
# call the parent dispatch first to pre-populate few things before we check for permissions
original_return_value = super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
# verify class settings
if self.permission_required == None or len(self.permission_required.split('.')) != 2:
raise ImproperlyConfigured("'PermissionRequiredMixin' requires 'permission_required' attribute to be set to '<app_label>.<permission codename>' but is set to '%s' instead" % self.permission_required)
# verify permission on object instance if needed
has_permission = False
if hasattr(self, 'object') and self.object is not None:
has_permission = request.user.has_perm(self.permission_required, self.object)
elif hasattr(self, 'get_object') and callable(self.get_object):
has_permission = request.user.has_perm(self.permission_required, self.get_object())
else:
has_permission = request.user.has_perm(self.permission_required)
# user failed permission
if not has_permission:
if self.raise_exception:
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = self.login_url, self.redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
# user passed permission check so just return the result of calling .dispatch()
return original_return_value
| bsd-2-clause | 9,121,473,484,465,784,000 | 45.086207 | 211 | 0.643903 | false |
martinogden/django-banner-rotator | banner_rotator/admin.py | 1 | 3825 | #-*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import forms, template
from django.contrib import admin
from django.contrib.admin.util import unquote
from django.db import models
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from banner_rotator.models import Campaign, Place, Banner, Click
class PlaceAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'size_str')
prepopulated_fields = {'slug': ('name',)}
class CampaignBannerInline(admin.StackedInline):
model = Banner
extra = 0
readonly_fields = ['views', 'clicks']
fields = ['is_active', 'places', 'name', 'url', 'file', 'weight', 'views', 'clicks']
formfield_overrides = {
models.ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
class CampaignAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at', 'updated_at')
fields = ('name',)
inlines = [CampaignBannerInline]
class BannerAdmin(admin.ModelAdmin):
list_display = ('name', 'campaign', 'weight', 'url', 'views', 'is_active')
list_filter = ('campaign', 'places', 'is_active')
date_hierarchy = 'created_at'
fieldsets = (
(_('Main'), {
'fields': ('campaign', 'places', 'name', 'url', 'url_target', 'file', 'alt'),
}),
(_('Show'), {
'fields': ('weight', 'views', 'max_views', 'clicks', 'max_clicks', 'start_at', 'finish_at', 'is_active'),
})
)
filter_horizontal = ('places',)
readonly_fields = ('views', 'clicks',)
object_log_clicks_template = None
def get_urls(self):
try:
# Django 1.4
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/log/clicks/$', wrap(self.log_clicks_view), name='%s_%s_log_clicks' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
)
return urlpatterns
def log_clicks_view(self, request, object_id, extra_context=None):
model = self.model
opts = model._meta
app_label = opts.app_label
obj = get_object_or_404(model, pk=unquote(object_id))
context = {
'title': _('Log clicks'),
'module_name': capfirst(opts.verbose_name_plural),
'object': obj,
'app_label': app_label,
'log_clicks': Click.objects.filter(banner=obj).order_by('-datetime')
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.object_log_clicks_template or [
"admin/%s/%s/object_log_clicks.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_log_clicks.html" % app_label,
], context, context_instance=context_instance)
admin.site.register(Banner, BannerAdmin)
admin.site.register(Campaign, CampaignAdmin)
admin.site.register(Place, PlaceAdmin)
| mit | 6,486,784,982,969,740,000 | 35.778846 | 117 | 0.608889 | false |
DailyActie/Surrogate-Model | 01-codes/deap-master/examples/coev/coop_evol.py | 1 | 6195 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the evolving test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.4. The number of species is evolved by adding and
removing species as stagnation occurs.
"""
import random
try:
import matplotlib.pyplot as plt
plt.figure()
except:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
NUM_SPECIES = 1
TARGET_SIZE = 30
IMPROVMENT_TRESHOLD = 0.5
IMPROVMENT_LENGTH = 5
EXTINCTION_TRESHOLD = 5.0
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
toolbox.register("evaluateContribution", coop_base.matchSetContribution)
def main(extended=True, verbose=True):
target_set = []
species = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
g = 0
for i in range(len(schematas)):
size = int(TARGET_SIZE / len(schematas))
target_set.extend(toolbox.target_set(schematas[i], size))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
species_index = list(range(NUM_SPECIES))
last_index_added = species_index[-1]
# Init with random a representative for each species
representatives = [random.choice(species[i]) for i in range(NUM_SPECIES)]
best_fitness_history = [None] * IMPROVMENT_LENGTH
if plt and extended:
contribs = [[]]
stag_gen = []
collab = []
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for (i, s), j in zip(enumerate(species), species_index):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
# Get the representatives excluding the current species
r = representatives[:i] + representatives[i + 1:]
for ind in s:
# Evaluate and set the individual fitness
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=j, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
if plt and extended:
# Book keeping of the collaborative fitness
collab.append(next_repr[i].fitness.values[0])
g += 1
representatives = next_repr
# Keep representatives fitness for stagnation detection
best_fitness_history.pop(0)
best_fitness_history.append(representatives[0].fitness.values[0])
try:
diff = best_fitness_history[-1] - best_fitness_history[0]
except TypeError:
diff = float("inf")
if plt and extended:
for (i, rep), j in zip(enumerate(representatives), species_index):
contribs[j].append((toolbox.evaluateContribution(representatives,
target_set, i)[0], g - 1))
if diff < IMPROVMENT_TRESHOLD:
if len(species) > 1:
contributions = []
for i in range(len(species)):
contributions.append(toolbox.evaluateContribution(representatives, target_set, i)[0])
for i in reversed(range(len(species))):
if contributions[i] < EXTINCTION_TRESHOLD:
species.pop(i)
species_index.pop(i)
representatives.pop(i)
last_index_added += 1
best_fitness_history = [None] * IMPROVMENT_LENGTH
species.append(toolbox.species())
species_index.append(last_index_added)
representatives.append(random.choice(species[-1]))
if extended and plt:
stag_gen.append(g - 1)
contribs.append([])
if extended:
for r in representatives:
# print final representatives without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if extended and plt: # Ploting of the evolution
line1, = plt.plot(collab, "--", color="k")
for con in contribs:
try:
con, g = zip(*con)
line2, = plt.plot(g, con, "-", color="k")
except ValueError:
pass
axis = plt.axis("tight")
for s in stag_gen:
plt.plot([s, s], [0, axis[-1]], "--", color="k")
plt.legend((line1, line2), ("Collaboration", "Contribution"), loc="center right")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.show()
if __name__ == "__main__":
main()
| mit | 5,767,746,708,003,492,000 | 33.226519 | 105 | 0.585149 | false |
dengxiangyu768/dengxytools | ops/aliyun_stack.py | 1 | 6153 | #coding:utf8
import sys
import traceback
import os
from aliyunsdkcore.client import AcsClient
from aliyunsdkros.request.v20150901 import DescribeRegionsRequest, CreateStacksRequest
from aliyunsdkros.request.v20150901 import DescribeResourcesRequest
from aliyunsdkros.request.v20150901 import DeleteStackRequest
from aliyunsdkros.request.v20150901 import DescribeStackDetailRequest
from aliyunsdkros.request.v20150901 import DescribeResourceDetailRequest
from aliyunsdkros.request.v20150901 import DescribeResourceTypeDetailRequest
from aliyunsdkros.request.v20150901 import DescribeStacksRequest
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
from aliyunsdkros.request.v20150901 import ValidateTemplateRequest
import pprint
import time
import json
def getTemplate(stackName, instanceCount):
# "ImageId":"m-2zefbha9mr8gcq5m5qjx",
template = '''
{
"ROSTemplateFormatVersion": "2015-09-01",
"Parameters" : {
"MaxAmount": {
"Type" : "String",
"Default": "2",
"Description": "count"
},
"MinAmount": {
"Type" : "String",
"Default": "2",
"Description": "count"
}
},
"Resources": {
"%s": {
"Type": "ALIYUN::ECS::InstanceGroup",
"Description": "Create a ECS instance for demo.",
"Properties": {
"ZoneId":"cn-shenzhen-c",
"VpcId":"vpc-wz9a7uv2zck7f16yavolo",
"VSwitchId":"vsw-wz90z83fp9dupm79q9gcj",
"AllocatePublicIP":"true",
"SecurityGroupId":"sg-wz929lbqp06cd20yicgr",
"InstanceType":"ecs.gn5i-c16g1.4xlarge",
"InternetChargeType":"PayByTraffic",
"InternetMaxBandwidthIn":5,
"ImageId":"m-wz9cnchd6x4yj5m91bpz",
"SystemDiskSize":100,
"SystemDiskCategory": "cloud_efficiency",
"KeyPairName":"aihome_cloud",
"MinAmount": %s,
"MaxAmount": %s
}
}
},
"Outputs": {
"gpu_instance_internelIps": {
"Value": {
"Fn::GetAtt": ["%s", "PublicIps"]
}
}
}
}
''' % (stackName, instanceCount, instanceCount, stackName)
return template
def createStack(client, stackName, region, instanceCount):
req = CreateStacksRequest.CreateStacksRequest()
req.set_headers({'x-acs-region-id': region})
template = getTemplate(stackName, instanceCount)
create_stack_body = '''
{
"Name": "%s",
"TimeoutMins": %d,
"Template": %s
}
''' % (stackName, 60, template)
req.set_content(create_stack_body)
# for i in range(5):
# try:
# body = client.do_action_with_exception(req)
# result = json.loads(body)
# print(result)
# break
# except:
# time.sleep(5)
# else:
# print "ERROR on create"
# sys.exit(1)
try:
body = client.do_action_with_exception(req)
result = json.loads(body)
print(result)
except Exception, e:
print 'str(Exception):\t', str(Exception)
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print 'e.message:\t', e.message
print 'traceback.print_exc():'; traceback.print_exc()
print 'traceback.format_exc():\n%s' % traceback.format_exc()
def getStackID(client, stackName):
req = DescribeStacksRequest.DescribeStacksRequest()
req.set_headers({'x-acs-region-id': 'cn-shenzhen'})
body = client.do_action_with_exception(req)
result = json.loads(body)
for i in result['Stacks']:
if i['Name'] == stackName:
return i['Id']
def getResourcesIP(client, stackName):
req = DescribeResourceDetailRequest.DescribeResourceDetailRequest()
req.set_StackName(stackName)
stackID = getStackID(client, stackName)
req.set_StackId(stackID)
req.set_ResourceName(stackName)
body = client.do_action_with_exception(req)
result = json.loads(body)
ecsIds = []
for item in json.loads(result[u'ResourceData']):
ecsIds.append(str(item))
if not ecsIds:
print "no exist ECS instance"
return False
IPs = getDescribeInstances(client, ecsIds)
hf = open("/root/cluster/host.txt", "w")
for ip, name in IPs:
hf.write(ip + " " + name + '\n')
print ip, name
hf.close()
def deleteStack(client, stackName, region):
req = DeleteStackRequest.DeleteStackRequest()
stackID = getStackID(client, stackName)
if not stackID:
print "no exists stack: %s" % stackName
req.set_StackId(stackID)
req.set_headers({'x-acs-region-id': 'region'})
req.set_StackName(stackName)
body = client.do_action_with_exception(req)
print(body)
def getRegions(client):
req = DescribeRegionsRequest.DescribeRegionsRequest()
body = client.do_action_with_exception(req)
regions = json.loads(body)
for r in regions["Regions"]:
print r['LocalName'], r['RegionId']
def getDescribeInstances(client, instanceIds):
req = DescribeInstancesRequest.DescribeInstancesRequest()
req.set_InstanceIds(instanceIds)
req.set_PageSize(len(instanceIds))
response = json.loads(client.do_action_with_exception(req))
IPs = []
for instance in response['Instances']['Instance']:
IPs.append((instance['NetworkInterfaces']['NetworkInterface'][0]['PrimaryIpAddress'], instance['InstanceName']))
return IPs
def validateTemplate(client, stackName, instanceCount):
req = ValidateTemplateRequest.ValidateTemplateRequest()
template = getTemplate(stackName, instanceCount)
create_stack_body = '''
{
"Name": "%s",
"TimeoutMins": %d,
"Template": %s
}
''' % (stackName, 60, template)
req.set_content(create_stack_body)
body = client.do_action_with_exception(req)
result = json.loads(body)
print(result)
if __name__ == "__main__":
region = 'beijing'
client = AcsClient('*****', '******', region)
option = sys.argv[1]
if option == "create":
createStack(client, "shenzhen_gpu_stack", region, 1)
elif option == "show":
getResourcesIP(client, "shenzhen_gpu_stack")
elif option == "delete":
deleteStack(client, "shenzhen_gpu_stack", region)
elif option == 'validate':
validateTemplate(client, "shenzhen_gpu_stack", 1)
| apache-2.0 | 6,446,802,008,184,042,000 | 30.392857 | 116 | 0.65659 | false |
ragupta-git/ImcSdk | imcsdk/mometa/bios/BiosVfLegacyUSBSupport.py | 1 | 3878 | """This module contains the general information for BiosVfLegacyUSBSupport ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfLegacyUSBSupportConsts:
VP_LEGACY_USBSUPPORT_AUTO = "Auto"
VP_LEGACY_USBSUPPORT_DISABLED = "Disabled"
VP_LEGACY_USBSUPPORT_ENABLED = "Enabled"
_VP_LEGACY_USBSUPPORT_AUTO = "auto"
_VP_LEGACY_USBSUPPORT_DISABLED = "disabled"
_VP_LEGACY_USBSUPPORT_ENABLED = "enabled"
VP_LEGACY_USBSUPPORT_PLATFORM_DEFAULT = "platform-default"
class BiosVfLegacyUSBSupport(ManagedObject):
"""This is BiosVfLegacyUSBSupport class."""
consts = BiosVfLegacyUSBSupportConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfLegacyUSBSupport", "biosVfLegacyUSBSupport", "LegacyUSB-Support", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfLegacyUSBSupport", "biosVfLegacyUSBSupport", "LegacyUSB-Support", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_legacy_usb_support": MoPropertyMeta("vp_legacy_usb_support", "vpLegacyUSBSupport", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Auto", "Disabled", "Enabled", "auto", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_legacy_usb_support": MoPropertyMeta("vp_legacy_usb_support", "vpLegacyUSBSupport", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "auto", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpLegacyUSBSupport": "vp_legacy_usb_support",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpLegacyUSBSupport": "vp_legacy_usb_support",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_legacy_usb_support = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfLegacyUSBSupport", parent_mo_or_dn, **kwargs)
| apache-2.0 | -8,197,067,225,577,513,000 | 49.363636 | 274 | 0.625064 | false |
wummel/patool | tests/archives/test_pybz2.py | 1 | 1203 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import ArchiveTest, Content
from .. import needs_program
class TestPybz2 (ArchiveTest):
program = 'py_bz2'
@needs_program('bzip2')
def test_py_bz2 (self):
self.archive_extract('t.txt.bz2', check=Content.Singlefile)
# bzip2 is used to test the created archive
self.archive_create('t.txt.bz2', check=Content.Singlefile)
@needs_program('file')
def test_py_bz2_file (self):
self.archive_extract('t.txt.bz2.foo', check=Content.Singlefile)
| gpl-3.0 | 415,507,764,006,046,660 | 36.59375 | 71 | 0.718204 | false |
thenetcircle/dino | test/utils/test_utils.py | 1 | 15563 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from datetime import timedelta
from unittest import TestCase
from uuid import uuid4 as uuid
from activitystreams import parse as as_parser
from dino import environ
from dino import utils
from dino.config import ConfigKeys
from dino.config import ApiActions
from dino.exceptions import NoOriginRoomException
from dino.exceptions import NoTargetRoomException
from dino.exceptions import NoTargetChannelException
from dino.exceptions import NoOriginChannelException
from dino.validation.acl import AclRangeValidator
from dino.validation.acl import AclSameChannelValidator
from dino.validation.acl import AclStrInCsvValidator
from dino.validation.acl import AclDisallowValidator
class FakeDb(object):
_room_contains = dict()
_moderators = dict()
_owners = dict()
_admins = dict()
_super_users = set()
_channel_owners = dict()
_global_moderators = dict()
_bans = {
'global': '',
'channel': '',
'room': ''
}
_room_acls = {
'message': dict(),
'crossroom': {'samechannel': ''},
}
_channel_acls = {
'message': dict(),
'crossroom': {'samechannel': ''},
}
_channel_names = dict()
def is_admin(self, channel_id, user_id):
if channel_id not in FakeDb._admins:
return False
return user_id in FakeDb._admins[channel_id]
def is_owner(self, room_id, user_id):
if room_id not in self._owners:
return False
return self._owners[room_id] is not None and user_id in self._owners[room_id]
def is_owner_channel(self, channel_id, user_id):
if channel_id not in FakeDb._channel_owners:
return False
return FakeDb._channel_owners[channel_id] is not None and user_id in FakeDb._channel_owners[channel_id]
def is_super_user(self, user_id):
return user_id in FakeDb._super_users
def is_global_moderator(self, user_id):
return user_id in FakeDb._global_moderators
def is_moderator(self, room_id, user_id):
return room_id in FakeDb._moderators and user_id in FakeDb._moderators[room_id]
def room_contains(self, room_id, user_id):
if room_id not in FakeDb._room_contains:
return False
return user_id in FakeDb._room_contains[room_id]
def set_user_name(self, user_id, user_name):
pass
def get_user_ban_status(self, room_id, user_id):
return FakeDb._bans
def get_channel_name(self, channel_id):
if channel_id not in FakeDb._channel_names:
return None
return FakeDb._channel_names[channel_id]
def get_acls_in_channel_for_action(self, channel_id, action):
if action not in FakeDb._channel_acls:
return dict()
return FakeDb._channel_acls[action]
def get_acls_in_room_for_action(self, room_id: str, action: str):
if action not in FakeDb._room_acls:
return dict()
return FakeDb._room_acls[action]
def get_admin_room(self, *args):
return BaseWithDb.ROOM_ID
def channel_for_room(self, *args):
return BaseWithDb.CHANNEL_ID
def get_last_read_timestamp(self, *args):
return datetime.utcnow().strftime(ConfigKeys.DEFAULT_DATE_FORMAT)
class BaseWithDb(TestCase):
OTHER_USER_ID = '9876'
CHANNEL_ID = '8765'
CHANNEL_NAME = 'Shanghai'
ROOM_ID = '4567'
ROOM_NAME = 'cool guys'
OTHER_ROOM_ID = '9999'
OTHER_CHANNEL_ID = '8888'
USER_ID = '1234'
USER_NAME = 'Joe'
AGE = '30'
GENDER = 'f'
MEMBERSHIP = '0'
IMAGE = 'y'
HAS_WEBCAM = 'y'
FAKE_CHECKED = 'n'
COUNTRY = 'cn'
CITY = 'Shanghai'
TOKEN = str(uuid())
def remove_owner(self):
FakeDb._owners[BaseWithDb.ROOM_ID] = None
def set_super_user(self):
FakeDb._super_users.add(BaseWithDb.USER_ID)
def set_owner(self):
FakeDb._owners[BaseWithDb.ROOM_ID] = BaseWithDb.USER_ID
def set_moderator(self):
FakeDb._moderators[BaseWithDb.ROOM_ID] = BaseWithDb.USER_ID
def set_channel_owner(self):
FakeDb._channel_owners[BaseWithDb.CHANNEL_ID] = {BaseWithDb.USER_ID}
def set_channel_admin(self):
FakeDb._admins[BaseWithDb.CHANNEL_ID] = {BaseWithDb.USER_ID}
def setUp(self):
environ.env.db = FakeDb()
FakeDb._room_contains = {
BaseWithDb.ROOM_ID: {
BaseWithDb.USER_ID
},
BaseWithDb.OTHER_ROOM_ID: set()
}
FakeDb._bans = {
'global': '',
'channel': '',
'room': ''
}
FakeDb._room_acls = dict()
FakeDb._channel_acls = dict()
FakeDb._admins = dict()
FakeDb._super_users = set()
FakeDb._channel_owners = dict()
FakeDb._owners = dict()
FakeDb._moderators = dict()
FakeDb._channel_names = {
BaseWithDb.CHANNEL_ID: BaseWithDb.CHANNEL_NAME
}
environ.env.config = {
ConfigKeys.ACL: {
'room': {
'crossroom': {
'acls': [
'samechannel',
'disallow'
]
},
'message': {
'acls': [
'gender',
'age',
'country',
]
}
},
'channel': {
'crossroom': {
'acls': [
'samechannel',
'disallow'
]
},
'message': {
'acls': [
'gender',
'age',
'country'
]
}
},
'available': {
'acls': [
'gender',
'age',
'samechannel',
'disallow'
]
},
'validation': {
'disallow': {
'type': 'disallow',
'value': AclDisallowValidator()
},
'samechannel': {
'type': 'samechannel',
'value': AclSameChannelValidator()
},
'country': {
'type': 'anything',
'value': AclStrInCsvValidator()
},
'gender': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('m,f')
},
'age': {
'type': 'range',
'value': AclRangeValidator()
}
}
}
}
def ban_user(self, past=False, target='channel'):
if past:
bantime = datetime.utcnow() - timedelta(0, 240) # 4 minutes ago
else:
bantime = datetime.utcnow() + timedelta(0, 240) # 4 minutes left
bantime = str(bantime.timestamp()).split('.')[0]
FakeDb._bans[target] = bantime
class UtilsBase64Test(TestCase):
def setUp(self):
self.b64 = 'YXNkZg=='
self.plain = 'asdf'
def test_b64e(self):
self.assertEqual(self.b64, utils.b64e(self.plain))
def test_b64e_blank(self):
self.assertEqual('', utils.b64e(''))
def test_b64e_none(self):
self.assertEqual('', utils.b64e(None))
def test_b64d(self):
self.assertEqual(self.plain, utils.b64d(self.b64))
def test_b64d_blank(self):
self.assertEqual('', utils.b64d(''))
def test_b64d_none(self):
self.assertEqual('', utils.b64d(None))
def test_b64d_invalid(self):
self.assertEqual('', utils.b64d('åäåö'))
class UtilsActivityForTest(TestCase):
def test_activity_for_user_banned(self):
self.assertIsNotNone(utils.activity_for_user_banned('1', '2', '3', '4', '5', '6'))
def test_activity_for_user_kicked(self):
self.assertIsNotNone(utils.activity_for_user_kicked('1', '2', '3', '4', '5', '6'))
def test_activity_for_request_admin(self):
self.assertIsNotNone(utils.activity_for_request_admin('1', '2', '3', '4', '5', '6'))
def test_activity_for_list_channels(self):
channels = {'id': ('namne', 1, 'normal'), 'other-id': ('other-name', 2, 'normal')}
self.assertIsNotNone(utils.activity_for_list_channels(channels))
def test_activity_for_invite(self):
self.assertIsNotNone(utils.activity_for_invite('1', '2', '3', '4', '5', '6'))
def test_activity_for_whisper(self):
self.assertIsNotNone(utils.activity_for_whisper('1', '2', '3', '4', '5', '6', '7'))
class UtilsSmallFunctionsTest(BaseWithDb):
def setUp(self):
super(UtilsSmallFunctionsTest, self).setUp()
def test_is_user_in_room(self):
self.assertTrue(utils.is_user_in_room(BaseWithDb.USER_ID, BaseWithDb.ROOM_ID))
def test_is_user_in_room_blank_room(self):
self.assertFalse(utils.is_user_in_room(BaseWithDb.USER_ID, ''))
def test_set_name_for_user_id(self):
utils.set_name_for_user_id(BaseWithDb.USER_ID, BaseWithDb.USER_NAME)
def test_is_not_banned(self):
is_banned, msg = utils.is_banned(BaseWithDb.USER_ID, BaseWithDb.ROOM_ID)
self.assertFalse(is_banned)
def test_is_banned_channel(self):
self.ban_user(target='channel')
is_banned, msg = utils.is_banned(BaseWithDb.USER_ID, BaseWithDb.ROOM_ID)
self.assertTrue(is_banned)
def test_is_banned_room(self):
self.ban_user(target='room')
is_banned, msg = utils.is_banned(BaseWithDb.USER_ID, BaseWithDb.ROOM_ID)
self.assertTrue(is_banned)
def test_is_banned_global(self):
self.ban_user(target='global')
is_banned, msg = utils.is_banned(BaseWithDb.USER_ID, BaseWithDb.ROOM_ID)
self.assertTrue(is_banned)
def test_get_channel_name(self):
self.assertEqual(BaseWithDb.CHANNEL_NAME, utils.get_channel_name(BaseWithDb.CHANNEL_ID))
def test_get_admin_room(self):
self.assertEqual(BaseWithDb.ROOM_ID, utils.get_admin_room())
def test_owner_is_allowed_to_delete_message(self):
self.set_owner()
self.assertTrue(utils.user_is_allowed_to_delete_message(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
def test_admin_is_allowed_to_delete_message(self):
self.set_channel_admin()
self.assertTrue(utils.user_is_allowed_to_delete_message(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
def test_moderator_is_allowed_to_delete_message(self):
self.set_moderator()
self.assertTrue(utils.user_is_allowed_to_delete_message(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
def test_super_user_is_allowed_to_delete_message(self):
self.set_super_user()
self.assertTrue(utils.user_is_allowed_to_delete_message(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
def test_user_is_not_allowed_to_delete_message(self):
self.assertFalse(utils.user_is_allowed_to_delete_message(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
def test_get_last_read_for(self):
self.assertIsNotNone(utils.get_last_read_for(BaseWithDb.ROOM_ID, BaseWithDb.USER_ID))
class UtilsCanSendCrossRoomTest(BaseWithDb):
def json(self):
return {
'actor': {
'id': BaseWithDb.USER_ID
},
'object': {
'url': BaseWithDb.CHANNEL_ID
},
'provider': {
'url': BaseWithDb.CHANNEL_ID
},
'target': {
'objectType': 'room'
},
'verb': 'send'
}
def test_allowed(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'samechannel': ''}
allowed = utils.can_send_cross_room(as_parser(act), BaseWithDb.ROOM_ID, BaseWithDb.OTHER_ROOM_ID)
self.assertTrue(allowed)
def test_allowed_same_room(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'samechannel': ''}
allowed = utils.can_send_cross_room(as_parser(act), BaseWithDb.ROOM_ID, BaseWithDb.ROOM_ID)
self.assertTrue(allowed)
def test_not_allowed_different_channel(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'samechannel': ''}
act['provider']['url'] = BaseWithDb.OTHER_CHANNEL_ID
allowed = utils.can_send_cross_room(as_parser(act), BaseWithDb.ROOM_ID, BaseWithDb.OTHER_ROOM_ID)
self.assertFalse(allowed)
def test_no_origin_room(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'samechannel': ''}
self.assertRaises(NoOriginRoomException, utils.can_send_cross_room, as_parser(act), None, BaseWithDb.OTHER_ROOM_ID)
def test_no_target_room(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'samechannel': ''}
self.assertRaises(NoTargetRoomException, utils.can_send_cross_room, as_parser(act), BaseWithDb.ROOM_ID, None)
def test_not_allowed(self):
act = self.json()
FakeDb._channel_acls[ApiActions.CROSSROOM] = {'disallow': ''}
allowed = utils.can_send_cross_room(as_parser(act), BaseWithDb.ROOM_ID, BaseWithDb.OTHER_ROOM_ID)
self.assertFalse(allowed)
class UtilsBanDurationTest(TestCase):
def get_now_plus(self, days=0, hours=0, minutes=0, seconds=0):
now = datetime.utcnow()
if minutes != 0:
seconds += minutes*60
ban_time = timedelta(days=days, hours=hours, seconds=seconds)
end_date = now + ban_time
return str(int(end_date.timestamp()))
def test_ban_duration_seconds(self):
expected = self.get_now_plus(seconds=50)
timestamp = utils.ban_duration_to_timestamp('50s')
self.assertEqual(expected, timestamp)
def test_ban_duration_hours(self):
expected = self.get_now_plus(hours=12)
timestamp = utils.ban_duration_to_timestamp('12h')
self.assertEqual(expected, timestamp)
def test_ban_duration_minutes(self):
expected = self.get_now_plus(minutes=15)
timestamp = utils.ban_duration_to_timestamp('15m')
self.assertEqual(expected, timestamp)
def test_ban_duration_days(self):
expected = self.get_now_plus(days=5)
timestamp = utils.ban_duration_to_timestamp('5d')
self.assertEqual(expected, timestamp)
def test_negative_duration(self):
self.assertRaises(ValueError, utils.ban_duration_to_timestamp, '-5d')
def test_ban_duration_invalid_unit(self):
self.assertRaises(ValueError, utils.ban_duration_to_timestamp, '5u')
| apache-2.0 | -8,109,607,419,109,835,000 | 32.897603 | 123 | 0.579729 | false |
MeerkatLabs/sleekpromises | test/promises/test_2_2_2.py | 1 | 9100 | """
2.2.2: If `onFulfilled` is a function
https://github.com/promises-aplus/promises-tests/blob/2.1.1/lib/tests/2.2.2.js
"""
import threading
from sleekxmpp.test import SleekTest
class Promise_2_2_2_1_TestCase(SleekTest):
"""
2.2.2.1: it must be called after `promise` is fulfilled, with `promise`'s fulfillment value as its first argument.
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_already_fulfilled(self):
self.session['called'] = False
event = threading.Event()
def fulfilled_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def rejected_called(arg):
self.assertFalse(self.session['called'])
# Create a promise and resolve it
promise = self.scheduler.promise()
promise.resolved(self.sentinel)
promise.then(fulfilled_called, rejected_called)
event.wait(1.0)
self.assertTrue(self.session['called'])
def test_immediately_fulfilled(self):
self.session['called'] = False
event = threading.Event()
def fulfilled_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def rejected_called(arg):
self.assertFalse(self.session['called'])
# Create a promise and resolve it
promise = self.scheduler.promise()
promise.then(fulfilled_called, rejected_called)
promise.resolved(self.sentinel)
event.wait(1.0)
self.assertTrue(self.session['called'])
def test_eventually_fulfilled(self):
self.session['called'] = False
event = threading.Event()
def fulfilled_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def rejected_called(arg):
self.assertFalse(self.session['called'])
def deferred_method():
self.session['promise'].resolved(self.sentinel)
# Create a promise and store it off
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled_called, rejected_called)
# Schedule it on a different thread.
self.scheduler.schedule_task(deferred_method, delay=0.1)
event.wait(1.0)
self.assertTrue(self.session['called'])
class Promise_2_2_2_2_TestCase(SleekTest):
"""
2.2.2.2: it must not be called before `promise` is fulfilled
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_fulfilled_after_a_delay(self):
self.session['afterResolve'] = False
event = threading.Event()
def fulfilled_called(arg):
self.assertTrue(self.session['afterResolve'])
event.set()
def deferred():
promise.resolved(self.dummy)
self.session['afterResolve'] = True
# Create a promise and resolve it
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled_called)
self.scheduler.schedule_task(deferred, delay=0.05)
event_wait = event.wait(1.0)
self.assertTrue(self.session['afterResolve'])
self.assertTrue(event_wait)
def test_never_fulfilled(self):
self.session['called'] = False
event = threading.Event()
def fulfilled_called(arg):
self.session['called'] = True
event.set()
promise = self.scheduler.promise()
promise.then(fulfilled_called)
event_wait = event.wait(0.150)
self.assertFalse(self.session['called'])
self.assertFalse(event_wait)
class Promise_2_2_2_3_TestCase(SleekTest):
"""
2.2.2.3: it must not be called more than once.
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_already_fulfilled(self):
self.session['times_called'] = 0
event = threading.Event()
def fulfilled(arg):
self.session['times_called'] += 1
event.set()
promise = self.scheduler.promise()
promise.resolved(self.dummy)
promise.then(fulfilled)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_fulfill_a_pending_promise_more_than_once_immediately(self):
self.session['times_called'] = 0
event = threading.Event()
def fulfilled(arg):
self.session['times_called'] += 1
event.set()
promise = self.scheduler.promise()
promise.then(fulfilled)
promise.resolved(self.dummy)
promise.resolved(self.dummy)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_fulfill_a_pending_promise_more_than_once_delayed(self):
self.session['times_called'] = 0
event = threading.Event()
def fulfilled(arg):
self.session['times_called'] += 1
event.set()
def deferred():
promise = self.session['promise']
promise.resolved(self.dummy)
promise.resolved(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled)
self.scheduler.schedule_task(deferred, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_fulfill_a_pending_promise_more_than_once_immediately_then_delayed(self):
self.session['times_called'] = 0
event = threading.Event()
def fulfilled(arg):
self.session['times_called'] += 1
event.set()
def deferred():
promise = self.session['promise']
promise.resolved(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled)
promise.resolved(self.dummy)
self.scheduler.schedule_task(deferred, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_when_multiple_then_calls_are_made_spaced_apart_in_time(self):
self.session['times_called'] = [0, 0, 0]
event = threading.Event()
def fulfilled_0(arg):
self.session['times_called'][0] += 1
def fulfilled_1(arg):
self.session['times_called'][1] += 1
def fulfilled_2(arg):
self.session['times_called'][2] += 1
event.set()
def resolve_function():
promise = self.session['promise']
promise.resolved(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled_0)
self.scheduler.schedule_task(lambda: promise.then(fulfilled_1), delay=0.05)
self.scheduler.schedule_task(lambda: promise.then(fulfilled_2), delay=0.10)
self.scheduler.schedule_task(resolve_function, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual([1, 1, 1], self.session['times_called'])
def test_when_then_is_interleaved_with_fulfillment(self):
self.session['times_called'] = [0, 0]
event = threading.Event()
def fulfilled_0(arg):
self.session['times_called'][0] += 1
def fulfilled_1(arg):
self.session['times_called'][1] += 1
event.set()
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled_0)
promise.resolved(self.dummy)
promise.then(fulfilled_1)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual([1, 1], self.session['times_called'])
| bsd-3-clause | 6,575,437,319,859,750,000 | 25.608187 | 118 | 0.600769 | false |
spino327/sdr_testbed | DistributedTestbed/receiver/RXGui.py | 1 | 8188 | '''
Copyright (c) 2010, Universidad Industrial de Santander, Colombia
University of Delaware
All rights reserved.
@author: Sergio Pino
@author: Henry Arguello
Website: http://www.eecis.udel.edu/
emails : [email protected] - [email protected]
Date : Nov, 2010
'''
from gnuradio.wxgui import fftsink2, scopesink2
from gnuradio import gr
from gnuradio import blks2
from grc_gnuradio import wxgui
from gnuradio.wxgui import forms
import wx
class RXGui(gr.hier_block2):
'''
This class have several input ports, the first one is for the Antenna signal and the second is for
the demodulator output
This class construct the GUI for the application, flow graph
2 2 2
--->(throttle)--->(resampler)--->(fft)
| 2
--->(scope)
2 fft = 1 for raw antenna samples and 1 for matched filter
2 scope = 1 for raw antenna samples and 1 for matched filter
'''
def __init__(self, app, gain, fc, samp_rate, inter, dec):
'''
in:
- app = object of type RXApp
- gain = gain
- samp_rate = sample rate in Hertz
- inter = interpolation factor
- dec = decimation factor
'''
gr.hier_block2.__init__(self, "RXGui",
gr.io_signature(2, 2, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
# instance variables
self.app = app
self.gui = wxgui.top_block_gui("BPSK RX")
self.nb = self.__createNoteBook()
# controls
self.gainTextBox = forms.text_box(
parent=self.gui.GetWin(),
value=gain,
callback=self.setGain,
label="gain",
converter=forms.float_converter(),
)
self.fcTextBox = forms.text_box(
parent=self.gui.GetWin(),
value=fc,
callback=self.setFc,
label="fc",
converter=forms.float_converter(),
)
self.startButton = wx.Button(self.gui.GetWin(), label="Record")
self.startButton.Bind(wx.EVT_BUTTON, self.startRecording)
# adding the visual components to the notebook
self.gui.Add(self.gainTextBox)
self.gui.Add(self.fcTextBox)
self.gui.Add(self.startButton)
#EO Controls
# for port 1 Antenna samples (COMPLEX)
self.throttleAn = gr.throttle(gr.sizeof_gr_complex, samp_rate)
# resampler Antenna
if inter == 1 and dec == 1:
self.resamplerAn = gr.multiply_const_vcc((1,))
print("i: resamplerAn not need")
else:
self.resamplerAn = blks2.rational_resampler_ccc(
interpolation=inter,
decimation=dec,
taps=None,
fractional_bw=None,
)
# self.cmp2arg1 = gr.complex_to_arg()
self.fftAn = fftsink2.fft_sink_c(
self.nb.GetPage(0).GetWin(),
baseband_freq=0,
y_per_div=5,
y_divs=10,
ref_level=-40,
sample_rate= inter*samp_rate/dec,
fft_size=512,
fft_rate=10,
average=True,
avg_alpha=0.1,
title="FFT Plot Antenna",
peak_hold=False,
)
self.scope_IQAn = scopesink2.scope_sink_c(
self.nb.GetPage(1).GetWin(),
title="Scope IQ Antenna",
sample_rate = inter*samp_rate/dec,
v_scale=0.001,
t_scale=0.001,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
# adding the visual components to the notebook
self.nb.GetPage(0).Add(self.fftAn.win)
self.nb.GetPage(1).Add(self.scope_IQAn.win)
# for port 2 Matched filter (COMPLEX)
self.throttleMF = gr.throttle(gr.sizeof_gr_complex, samp_rate)
# resampler MF
if inter == 1 and dec == 1:
self.resamplerMF = gr.multiply_const_vcc((1,))
print("i: resamplerMF not need")
else:
self.resamplerMF = blks2.rational_resampler_ccc(
interpolation=inter,
decimation=dec,
taps=None,
fractional_bw=None,
)
# self.cmp2arg1 = gr.complex_to_arg()
self.fftMF = fftsink2.fft_sink_c(
self.nb.GetPage(2).GetWin(),
baseband_freq=0,
y_per_div=5,
y_divs=10,
ref_level=-40,
sample_rate= inter*samp_rate/dec,
fft_size=512,
fft_rate=10,
average=True,
avg_alpha=0.1,
title="FFT Plot MF",
peak_hold=False,
)
self.scope_IQMF = scopesink2.scope_sink_c(
self.nb.GetPage(3).GetWin(),
title="Scope IQ MF",
sample_rate = inter*samp_rate/dec,
v_scale=0.0005,
t_scale=0.001,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
# adding the visual components to the notebook
self.nb.GetPage(2).Add(self.fftMF.win)
self.nb.GetPage(3).Add(self.scope_IQMF.win)
# end of MF
self.__makeConnections()
def __createNoteBook(self):
'''
creates the NoteBook
'''
n1 = wx.Notebook(self.gui.GetWin(), style=wx.NB_RIGHT)
n1.AddPage(wxgui.Panel(n1), "fft Ant")
n1.AddPage(wxgui.Panel(n1), "scopeIQ Ant")
n1.AddPage(wxgui.Panel(n1), "fft MF")
n1.AddPage(wxgui.Panel(n1), "scopeIQ MF", True)
self.gui.Add(n1)
return n1
def __makeConnections(self):
'''
uses the method connect(src, des)
'''
#Port 1
self.connect((self, 0), (self.throttleAn, 0))
self.connect((self.throttleAn, 0), (self.resamplerAn, 0))
self.connect((self.resamplerAn, 0), (self.fftAn, 0))
self.connect((self.resamplerAn, 0), (self.scope_IQAn, 0))
# self.connect((self.resamplerAn, 0), (self.cmp2arg1, 0))
# self.connect((self.cmp2arg1, 0), (self.fftAn, 0))
# self.connect((self.cmp2arg1, 0), (self.scope_IQAn, 0))
# null_sink = gr.null_sink(gr.sizeof_gr_complex*1)
# self.connect((self, 0), null_sink)
#Port 2
self.connect((self, 1), (self.throttleMF, 0))
self.connect((self.throttleMF, 0), (self.resamplerMF, 0))
self.connect((self.resamplerMF, 0), (self.fftMF, 0))
self.connect((self.resamplerMF, 0), (self.scope_IQMF, 0))
# self.connect((self.resamplerDem, 0), (self.cmp2arg2, 0))
# self.connect((self.cmp2arg2, 0), (self.fftDem, 0))
# self.connect((self.cmp2arg2, 0), (self.scope_IQDem, 0))
def Run(self):
'''
calls the Run method in the gui object
'''
self.gui.Run(True)
def setFc(self, fc):
self.fcTextBox.set_value(fc)
self.app.setFc(fc)
def setGain(self, gain):
self.gainTextBox.set_value(gain)
self.app.setGain(gain)
def startRecording(self, event):
self.app.startRecording()
#if __name__ == "__main__":
#
# tb = gr.top_block()
# signalRaw = gr.sig_source_c(1e4, gr.GR_SIN_WAVE, 350, 1)
# signalDem = gr.sig_source_c(1e4, gr.GR_TRI_WAVE, 200, 1)
# signalCL = gr.sig_source_c(1e4, gr.GR_SIN_WAVE, 350, 1)
# signalAGC = gr.sig_source_c(1e4, gr.GR_TRI_WAVE, 200, 1)
# temp = RXGui(None, 1, 0, 1e4, 1, 1)
# tb.connect(signalRaw, (temp, 0))
# tb.connect(signalAGC, (temp, 1))
# tb.connect(signalCL, (temp, 2))
# tb.connect(signalDem, (temp, 3))
# tb.start()
# temp.Run()
| apache-2.0 | -2,311,429,111,140,030,000 | 32.153846 | 102 | 0.51319 | false |
nfqsolutions/nfq-conductor | nfq/conductor/server.py | 1 | 5092 | # NFQ Conductor. A tool for centralizing and visualizing logs.
# Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
from datetime import datetime
from functools import partial
import zmq
from tornado import web
from tornado.options import options
from zmq.eventloop import ioloop, zmqstream
# Global variables for cached content. Linters will say it is not used.
from nfq.conductor.config import root_path
from nfq.conductor.db import Process, Daemon
from nfq.conductor.web import ConfigHandler, DeleteHandler, RelaunchHandler
from nfq.conductor.web import DaemonsHandler, DaemonHandler, ResetHandler, \
IndexHandler, LastLogsHandler, ComponentHandler, RestActiveHandler, \
RestLastHandler, RestPageHandler, RestCoLastHandler
from nfq.conductor.ws import WSHandler
from nfq.conductor.db import engine, Base, LogEntry, session, clients
ioloop.install()
def process_log(messages):
global clients
for message in messages:
parsed = json.loads(message.decode())
entry = LogEntry(
source=parsed['source'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
message=parsed['message']
)
session.add(entry)
sub_message = parsed['message']
if sub_message.startswith('~~~~'):
sub_message = sub_message.strip('~')
sub_parsed = json.loads(sub_message)
process = Process(
process=sub_parsed['process'],
wrapped=sub_parsed['wrapped'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
host=sub_parsed['host'],
source=parsed['source'],
label=sub_parsed['label'],
command=sub_parsed['command'],
running=True
)
session.add(process)
logging.info('Added process {}'.format(sub_parsed['label']))
elif sub_message.startswith('^^^^'):
sub_message = sub_message.strip('^')
logging.info(sub_message)
sub_parsed = json.loads(sub_message)
daemon = Daemon(
ip=sub_parsed['ip'],
uuid=sub_parsed['uuid'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
port=sub_parsed['port'],
active=True
)
session.add(daemon)
logging.info('Added daemon {}'.format(sub_parsed['uuid']))
# Manage subscriptions
for client in clients:
if client.subscription and client.subscription.findall(parsed['message']):
client.client.write_message(parsed['message'])
def collector(address):
"""
Process that collects all logs and saves them to a database
"""
context = zmq.Context()
socket = context.socket(zmq.PULL)
socket.bind(address)
stream_pull = zmqstream.ZMQStream(socket)
stream_pull.on_recv(process_log)
def make_app():
return web.Application([
(r'/', IndexHandler),
(r'/ws', WSHandler),
(r'/last/([0-9]+)', LastLogsHandler),
(r'/co/(.+)/([0-9]+)', ComponentHandler),
(r'/api/active_last/([0-9]+)', RestActiveHandler),
(r'/api/last/co/(.+)/([0-9]+)', RestCoLastHandler),
(r'/api/last/([0-9]+)', RestLastHandler),
(r'/api/page/([0-9]+)/count/([0-9]+)', RestPageHandler),
(r'/conductor', DaemonsHandler),
(r'/reset', ResetHandler),
(r'/config', ConfigHandler),
(r'/relaunch/(.+)', RelaunchHandler),
(r'/daemon/(.+)', DaemonHandler),
(r'/daemon_delete/(.+)', DeleteHandler),
(r'/(favicon.ico)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'img', 'favicon.ico')}),
(r'/css/(.*)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'css')}),
(r'/js/(.*)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'js')})
], autoreload=False) # Remove
def run():
# Configure DB stuff
logging.info('Updating DB tables...')
Base.metadata.create_all(engine)
logging.info('Done')
app = make_app()
app.listen(options.port)
ioloop.IOLoop.instance().run_sync(
partial(collector, address=options.collector)
)
logging.info('Starting event loop...')
ioloop.IOLoop.current().start()
if __name__ == '__main__':
run()
| agpl-3.0 | 8,734,836,519,722,331,000 | 33.639456 | 86 | 0.616064 | false |
akhilaananthram/nupic.research | classification/LanguageSensor.py | 1 | 9726 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.regions.PyRegion import PyRegion
class LanguageSensor(PyRegion):
"""
LanguageSensor (LS) is an extensible sensor for text data.
The LS obtains info from a file, csv or txt (not yet implemented).
An LS is essentially a shell containing two objects:
1. A DataSource object gets one record at a time. This record is returned
as a dict object. For example, a DataSource might return:
defaultdict(sample="Hello world!", labels=["Python"])
2. An encoder from nupic.fluent/encoders
The DataSource and LanguageEncoder are supplied after the node is created,
not in the node itself.
"""
def __init__(self,
verbosity=0,
numCategories=1):
"""
Create a node without an encoder or datasource.
TODO: use self._outputValues for logging(?)
"""
self.numCategories = numCategories
self.verbosity = verbosity
# These fields are set outside when building the region.
self.encoder = None
self.dataSource = None
self._outputValues = {}
self._iterNum = 0
@classmethod
def getSpec(cls):
"""Return base spec for this region. See base class method for more info."""
spec = {
"description":"Sensor that reads text data records and encodes them for "
"an HTM network.",
"singleNodeOnly":True,
"outputs":{
"dataOut":{
"description":"Encoded text",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":True,
},
"categoryOut":{
"description":"Index of the current word's category.",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":False,
},
"resetOut":{
"description":"Boolean reset output.",
"dataType":"Real32",
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
"sequenceIdOut":{
"description":"Sequence ID",
"dataType":'UInt64',
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
## commented out b/c dataType not cool w/ numpy
# "sourceOut":{
# "description":"Unencoded data from the source, input to the encoder",
# "dataType":str,
# "count":0,
# "regionLevel":True,
# "isDefaultOutput":False,
# },
## need these...??
# spatialTopDownOut=dict(
# description="""The top-down output signal, generated from
# feedback from SP""",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
# temporalTopDownOut=dict(
# description="""The top-down output signal, generated from
# feedback from TP through SP""",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
# classificationTopDownOut=dict(
# description="The top-down input signal, generated via feedback "
# "from classifier through TP through SP.",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
},
"inputs":{
"spatialTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from SP.",
"dataType":"Real32",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
"temporalTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from TP through SP.",
"dataType":"Real32",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
"classificationTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from classifier through TP through SP.",
"dataType":"int",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
},
"parameters":{
"verbosity":{
"description":"Verbosity level",
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":"",
},
"numCategories":{
"description":("Total number of categories to expect from the "
"FileRecordStream"),
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":""},
},
"commands":{},
}
return spec
def initialize(self, inputs, outputs):
"""Initialize the node after the network is fully linked."""
if self.encoder is None:
raise Exception("Unable to initialize LanguageSensor -- encoder has not been set")
if self.dataSource is None:
raise Exception("Unable to initialize LanguageSensor -- dataSource has not been set")
def populateCategoriesOut(self, categories, output):
"""
Populate the output array with the category indices.
Note: non-categories are represented with -1.
"""
if categories[0] is None:
# The record has no entry in category field.
output[:] = -1
else:
# Populate category output array by looping over the smaller of the
# output array (size specified by numCategories) and the record's number
# of categories.
[numpy.put(output, [i], cat)
for i, (_, cat) in enumerate(zip(output, categories))]
output[len(categories):] = -1
def compute(self, inputs, outputs):
"""
Get a record from the dataSource and encode it. The fields for inputs and
outputs are as defined in the spec above.
Expects the text data to be in under header "token" from the dataSource.
TODO: validate we're handling resets correctly
"""
data = self.dataSource.getNextRecordDict()
# The private keys in data are standard of RecordStreamIface objects. Any
# add'l keys are column headers from the data source.
# Copy important data input fields over to outputs dict. We set "sourceOut"
# explicitly b/c PyRegion.getSpec() won't take an output field w/ type str.
outputs["resetOut"][0] = data["_reset"]
outputs["sequenceIdOut"][0] = data["_sequenceId"]
outputs["sourceOut"] = data["_token"]
self.populateCategoriesOut(data["_category"], outputs['categoryOut'])
print outputs['categoryOut']
# Encode the token, where the encoding is a dict as expected in
# nupic.fluent ClassificationModel.
# The data key must match the datafile column header
# NOTE: this logic differs from RecordSensor, where output is a (sparse)
# numpy array populated in place. So we leave the data output alone for now,
# and (maybe) populate it in fluent.ClassificationModel.
outputs["encodingOut"] = self.encoder.encodeIntoArray(data["_token"], output=None)
self._iterNum += 1
def getOutputValues(self, outputName):
"""Return the dictionary of output values. Note that these are normal Python
lists, rather than numpy arrays. This is to support lists with mixed scalars
and strings, as in the case of records with categorical variables
"""
return self._outputValues[outputName]
def getOutputElementCount(self, name):
"""Returns the width of dataOut."""
if name == "resetOut" or name == "sequenceIdOut":
print ("WARNING: getOutputElementCount should not have been called with "
"{}.".format(name))
return 1
elif name == "dataOut":
if self.encoder == None:
raise Exception("Network requested output element count for {} on a "
"LanguageSensor node, but the encoder has not been set."
.format(name))
return self.encoder.getWidth()
elif name == "categoryOut":
return self.numCategories
elif (name == "sourceOut" or
name == 'spatialTopDownOut' or
name == 'temporalTopDownOut'):
if self.encoder == None:
raise Exception("Network requested output element count for {} on a "
"LanguageSensor node, but the encoder has not been set."
.format(name))
return len(self.encoder.getDescription())
else:
raise Exception("Unknown output {}.".format(name))
| gpl-3.0 | 5,535,361,808,632,589,000 | 33.489362 | 91 | 0.599321 | false |
google-research/fixmatch | ict.py | 1 | 5150 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interpolation Consistency Training for Semi-Supervised Learning.
Reimplementation of https://arxiv.org/abs/1903.03825
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import models, utils
from libml.data import PAIR_DATASETS
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class ICT(models.MultiModel):
def model(self, batch, lr, wd, ema, warmup_pos, consistency_weight, beta, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch, 2] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
l = tf.one_hot(l_in, self.nclass)
wd *= lr
warmup = tf.clip_by_value(tf.to_float(self.step) / (warmup_pos * (FLAGS.train_kimg << 10)), 0, 1)
y = tf.reshape(tf.transpose(y_in, [1, 0, 2, 3, 4]), [-1] + hwc)
y_1, y_2 = tf.split(y, 2)
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(xt_in)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
logits_x = classifier(xt_in, training=True)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Take only first call to update batch norm.
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
logits_teacher = classifier(y_1, training=True, getter=ema_getter)
labels_teacher = tf.stop_gradient(tf.nn.softmax(logits_teacher))
labels_teacher = labels_teacher * mix[:, :, 0, 0] + labels_teacher[::-1] * (1 - mix[:, :, 0, 0])
logits_student = classifier(y_1 * mix + y_1[::-1] * (1 - mix), training=True)
loss_mt = tf.reduce_mean((labels_teacher - tf.nn.softmax(logits_student)) ** 2, -1)
loss_mt = tf.reduce_mean(loss_mt)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=l, logits=logits_x)
loss = tf.reduce_mean(loss)
tf.summary.scalar('losses/xe', loss)
tf.summary.scalar('losses/mt', loss_mt)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss + loss_mt * warmup * consistency_weight,
colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = PAIR_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = ICT(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
warmup_pos=FLAGS.warmup_pos,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
consistency_weight=FLAGS.consistency_weight,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('consistency_weight', 50., 'Consistency weight.')
flags.DEFINE_float('warmup_pos', 0.4, 'Relative position at which constraint loss warmup ends.')
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
| apache-2.0 | 1,690,494,292,168,825,600 | 40.869919 | 113 | 0.639223 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/calibration/plot_calibration_multiclass.py | 1 | 7780 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
# plt.show()
pltshow(plt)
| mit | 718,850,117,828,271,900 | 36.95122 | 82 | 0.626864 | false |
siemens/django-dingos | dingos/migrations/0007_auto__add_userdata__add_unique_userdata_user_data_kind.py | 1 | 16414 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserData'
db.create_table(u'dingos_userdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('data_kind', self.gf('django.db.models.fields.SlugField')(max_length=32)),
('identifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dingos.Identifier'], null=True)),
))
db.send_create_signal(u'dingos', ['UserData'])
# Adding unique constraint on 'UserData', fields ['user', 'data_kind']
db.create_unique(u'dingos_userdata', ['user_id', 'data_kind'])
def backwards(self, orm):
# Removing unique constraint on 'UserData', fields ['user', 'data_kind']
db.delete_unique(u'dingos_userdata', ['user_id', 'data_kind'])
# Deleting model 'UserData'
db.delete_table(u'dingos_userdata')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dingos.blobstorage': {
'Meta': {'object_name': 'BlobStorage'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sha256': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'dingos.datatypenamespace': {
'Meta': {'object_name': 'DataTypeNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.fact': {
'Meta': {'object_name': 'Fact'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'dingos.factdatatype': {
'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.factterm': {
'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'dingos.factterm2type': {
'Meta': {'unique_together': "(('iobject_type', 'fact_term'),)", 'object_name': 'FactTerm2Type'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fact_term_thru'", 'symmetrical': 'False', 'to': u"orm['dingos.FactDataType']"}),
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_thru'", 'to': u"orm['dingos.FactTerm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_thru'", 'to': u"orm['dingos.InfoObjectType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.factvalue': {
'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'dingos.identifier': {
'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}),
'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'dingos.identifiernamespace': {
'Meta': {'object_name': 'IdentifierNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.infoobject': {
'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'},
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'dingos.infoobject2fact': {
'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'},
'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}),
'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}),
'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"})
},
u'dingos.infoobjectfamily': {
'Meta': {'object_name': 'InfoObjectFamily'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.infoobjectnaming': {
'Meta': {'ordering': "['position']", 'object_name': 'InfoObjectNaming'},
'format_string': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'dingos.infoobjecttype': {
'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.marking2x': {
'Meta': {'object_name': 'Marking2X'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'marked_item_thru'", 'to': u"orm['dingos.InfoObject']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'dingos.nodeid': {
'Meta': {'object_name': 'NodeID'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.relation': {
'Meta': {'unique_together': "(('source_id', 'target_id', 'relation_type'),)", 'object_name': 'Relation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'relation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Fact']"}),
'source_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yields_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'target_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yielded_by_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"})
},
u'dingos.revision': {
'Meta': {'object_name': 'Revision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'dingos.userdata': {
'Meta': {'unique_together': "(('user', 'data_kind'),)", 'object_name': 'UserData'},
'data_kind': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Identifier']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['dingos'] | gpl-2.0 | -1,355,547,385,660,828,000 | 75.705607 | 187 | 0.557268 | false |
openstack/oslo.context | doc/source/user/examples/usage.py | 1 | 1601 | #!/usr/bin/env python3
#
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A representative usage example of Oslo Context
This example requires the following modules to be installed.
$ pip install oslo.context oslo.log
More information can be found at:
https://docs.openstack.org/oslo.context/latest/user/usage.html
"""
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
CONF = cfg.CONF
DOMAIN = "demo"
logging.register_options(CONF)
logging.setup(CONF, DOMAIN)
LOG = logging.getLogger(__name__)
LOG.info("Message without context")
# ids in Openstack are 32 characters long
# For readability a shorter id value is used
context.RequestContext(user='6ce90b4d',
tenant='d6134462',
project_domain='a6b9360e')
LOG.info("Message with context")
context = context.RequestContext(user='ace90b4d',
tenant='b6134462',
project_domain='c6b9360e')
LOG.info("Message with passed context", context=context)
| apache-2.0 | -5,818,148,704,935,449,000 | 30.392157 | 75 | 0.713929 | false |
wojciechpolak/webxiangpianbu | tools/staticgen.py | 1 | 12710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# WebXiangpianbu Copyright (C) 2014, 2015 Wojciech Polak
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import sys
import glob
import getopt
import shutil
import signal
import codecs
from datetime import datetime
from django.utils import six
from django.utils.six.moves import urllib, SimpleHTTPServer, socketserver
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
os.environ['DJANGO_SETTINGS_MODULE'] = 'webxiang.settings'
sys.path.insert(0, os.path.join(SITE_ROOT, '../'))
import django
if hasattr(django, 'setup'):
django.setup()
from django.conf import settings
try:
from django.shortcuts import render
except ImportError as e:
print(e)
print("Copy `webxiang/settings_sample.py` to " \
"`webxiang/settings.py` and modify it to your needs.")
sys.exit(1)
from django.core.urlresolvers import set_urlconf, set_script_prefix
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from webxiang import webxiang
__generated = set()
__items_no = 0
def main():
opts = {
'verbose': 1,
'output_dir': None,
'album_dir': os.path.abspath(getattr(settings, 'ALBUM_DIR', 'albums')),
'photo_dir': os.path.abspath(getattr(settings, 'WEBXIANG_PHOTOS_ROOT', '')),
'root': '/',
'assets_url': getattr(settings, 'STATIC_URL', 'assets/'),
'photos_url': getattr(settings, 'WEBXIANG_PHOTOS_URL', 'data/'),
'names': 'index',
'lang': 'en',
'quick': False,
'copy': False,
'serve': None,
'port': 8000,
}
try:
gopts, args = getopt.getopt(sys.argv[1:], 'v:yl:sp:',
['help',
'verbose=',
'lang=',
'output-dir=',
'album-dir=',
'photo-dir=',
'root=',
'assets-url=',
'photos-url=',
'copy',
'quick=',
'serve=',
'port=',
])
for o, arg in gopts:
if o == '--help':
raise getopt.GetoptError('')
elif o in ('-v', '--verbose'):
opts['verbose'] = int(arg)
elif o == '--output-dir':
opts['output_dir'] = arg
elif o == '--album-dir':
opts['album_dir'] = os.path.abspath(arg)
settings.ALBUM_DIR = opts['album_dir']
elif o == '--photo-dir':
opts['photo_dir'] = os.path.abspath(arg)
elif o == '--root':
if not arg.endswith('/'):
arg += '/'
opts['root'] = arg
elif o == '--assets-url':
if not arg.endswith('/'):
arg += '/'
opts['assets_url'] = arg
elif o == '--photos-url':
if not arg.endswith('/'):
arg += '/'
opts['photos_url'] = arg
elif o in ('-l', '--lang'):
opts['lang'] = arg
elif o == '--copy':
opts['copy'] = True
elif o in ('-s', '--serve'):
opts['serve'] = arg
elif o in ('-p', '--port'):
opts['port'] = int(arg)
elif o == '--quick': # a quick shortcut
arg = os.path.expanduser(arg).rstrip('/')
opts['quick'] = arg
args = [os.path.basename(arg)]
if len(args):
opts['names'] = args[0]
if len(args) > 1:
opts['output_dir'] = args[1]
else:
opts['names'] = 'index'
except getopt.GetoptError:
print("Usage: %s [OPTION...] [ALBUM-NAME1,NAME2]" % sys.argv[0])
print("%s -- album static HTML generator" % sys.argv[0])
print("""
Options Default values
-v, --verbose [%(verbose)s]
--output-dir [output-DATETIME/]
--album-dir [%(album_dir)s]
--photo-dir [%(photo_dir)s]
--root [%(root)s]
--assets-url [%(assets_url)s]
--photos-url [%(photos_url)s]
-l, --lang [%(lang)s]
--copy [%(copy)s]
--quick [folder's name]
-s, --serve [output dir]
-p, --port [%(port)s]
""" % opts)
sys.exit(1)
signal.signal(signal.SIGTERM, lambda signum, frame: __quit_app())
signal.signal(signal.SIGINT, lambda signum, frame: __quit_app())
if opts['serve']:
serve(opts, opts['serve'])
sys.exit(0)
if opts['lang']:
if opts['verbose'] > 1:
print('Switching language to %s' % opts['lang'])
translation.activate(opts['lang'])
set_urlconf('webxiang.urls_static')
set_script_prefix(opts['root'])
root_dir = opts['output_dir'] and os.path.abspath(
os.path.expanduser(opts['output_dir'])) or \
'output-%s' % datetime.now().strftime('%Y%m%d-%H%M%S')
output_dir = os.path.join(root_dir, opts['root'].lstrip('/'))
if opts['quick']:
arg = opts['quick']
arg_basename = os.path.basename(arg)
opts['assets_url'] = '%s/assets/' % arg_basename
opts['photos_url'] = '%s/data/' % arg_basename
opts['album_dir'] = os.path.abspath(arg + '/')
opts['photo_dir'] = opts['album_dir']
settings.ALBUM_DIR = opts['album_dir']
opts['assets_url'] = urllib.parse.urljoin(opts['root'], opts['assets_url'])
opts['photos_url'] = urllib.parse.urljoin(opts['root'], opts['photos_url'])
settings.WEBXIANG_PHOTOS_URL = opts['photos_url']
try:
if not os.path.exists(output_dir):
print('Creating directory "%s"' % output_dir)
os.makedirs(output_dir)
except Exception as e:
pass
if not opts['photos_url'].startswith('http'):
photos_url = opts['photos_url'].\
replace(opts['root'], '', 1).lstrip('/')
photos_url = os.path.join(output_dir, photos_url)
if opts['copy']:
print('Copying photos "%s" into "%s"' % \
(opts['photo_dir'].rstrip('/'), photos_url))
try:
if not os.path.exists(photos_url):
os.makedirs(photos_url)
__copytree(opts['photo_dir'].rstrip('/'), photos_url)
except Exception as e:
print('Copying photos', e)
else:
print('Linking photos: ln -s %s %s' % \
(opts['photo_dir'].rstrip('/'), photos_url.rstrip('/')))
try:
d = os.path.dirname(photos_url.rstrip('/'))
if not os.path.exists(d):
os.makedirs(d)
os.symlink(opts['photo_dir'].rstrip('/'),
photos_url.rstrip('/'))
except Exception as e:
print('Linking photos', e)
print('Copying assets (JS, CSS, etc.) into "%s"' % \
os.path.join(root_dir, opts['assets_url'].lstrip('/')))
try:
__copytree(settings.STATIC_ROOT,
os.path.join(root_dir,
opts['assets_url'].lstrip('/')))
except Exception as e:
print('Copying assets', e)
print('Generating static pages.')
for album_name in opts['names'].split(','):
__gen_html_album(opts, album_name, output_dir=output_dir)
print('Finished %s' % output_dir)
print('Done. Created %d files.' % __items_no)
if opts['serve'] is not False:
serve(opts, root_dir)
def __quit_app(code=0):
print()
sys.exit(code)
def serve(opts, root_dir=None):
class SimpleServer(six.moves.socketserver.TCPServer):
allow_reuse_address = True
if root_dir:
os.chdir(root_dir)
httpd = SimpleServer(('localhost', opts['port']),
six.moves.SimpleHTTPServer.SimpleHTTPRequestHandler)
print('Serving at %s%s' % ('localhost:%d' % opts['port'], opts['root']))
print('Quit the server with CONTROL-C.')
httpd.serve_forever()
def __gen_html_album(opts, album_name, output_dir='.', page=1):
global __generated, __items_no
entry_id = '%s:%s' % (album_name, page)
if entry_id in __generated:
return
__generated.add(entry_id)
if page == 1:
print(album_name, end=' ')
data = webxiang.get_data(album=album_name, page=page)
if not data:
return
tpl = data['meta'].get('template') or 'default.html'
if not tpl.endswith('.html'):
tpl += '.html'
data['STATIC_URL'] = opts['assets_url']
try:
html = render_to_string(tpl, data)
except TemplateDoesNotExist:
html = render_to_string('default.html', data)
if page > 1:
output_file = os.path.join(output_dir, album_name,
_('page-%(number)s.html') % {'number': page})
else:
output_file = os.path.join(output_dir, album_name, 'index.html')
if opts['verbose'] > 1:
print('writing %s' % output_file)
elif opts['verbose'] == 1:
sys.stdout.write('.')
sys.stdout.flush()
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
f = codecs.open(output_file, 'w', 'utf-8')
f.write(str(html))
f.close()
__items_no += 1
# symlink '/index.html' to '/index/index.html'
if album_name == 'index':
os.symlink('index/index.html',
os.path.join(output_dir, 'index.html'))
for i in data['entries'].paginator.page_range_limited:
__gen_html_album(opts, album_name, output_dir=output_dir, page=i)
for entry in data['entries']:
if 'album' in entry:
__gen_html_album(opts, entry['album'], output_dir)
else:
__gen_html_photo(opts, album_name,
'%s/' % entry['index'], output_dir)
def __gen_html_photo(opts, album_name, entry_idx, output_dir='.'):
global __generated, __items_no
entry_id = '%s/%s' % (album_name, entry_idx)
if entry_id in __generated:
return
__generated.add(entry_id)
photo_idx = entry_idx.split('/')[0]
data = webxiang.get_data(album=album_name, photo=entry_idx)
if not data:
return
tpl = data['meta'].get('template') or 'default.html'
if not tpl.endswith('.html'):
tpl += '.html'
data['STATIC_URL'] = opts['assets_url']
try:
html = render_to_string(tpl, data)
except TemplateDoesNotExist:
html = render_to_string('default.html', data)
try:
os.makedirs(os.path.join(output_dir, album_name))
except:
pass
entry = data['entries'][int(photo_idx) - 1]
if 'slug' in entry:
photo_name = '%s/%s.html' % (photo_idx, entry['slug'])
else:
photo_name = '%s.html' % photo_idx
output_file = os.path.join(output_dir, album_name, photo_name)
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
if opts['verbose'] > 1:
print('writing %s' % output_file)
elif opts['verbose'] == 1:
sys.stdout.write('.')
sys.stdout.flush()
f = codecs.open(output_file, 'w', 'utf-8')
f.write(str(html))
f.close()
__items_no += 1
def __copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,789,874,336,057,884,000 | 31.589744 | 84 | 0.525649 | false |
bmng-dev/PyBitmessage | src/inventory.py | 1 | 10082 | import collections
import Queue
import time
from threading import enumerate as threadingEnumerate
from threading import RLock, current_thread
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery
from singleton import Singleton
@Singleton
class Inventory(collections.MutableMapping):
def __init__(self):
super(self.__class__, self).__init__()
self._inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
self.numberOfInventoryLookupsPerformed = 0
self._streams = collections.defaultdict(set) # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
self.lock = RLock() # Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
self.InventoryItem = collections.namedtuple('InventoryItem', 'type stream payload expires tag')
def __contains__(self, hash):
with self.lock:
self.numberOfInventoryLookupsPerformed += 1
if hash in self._inventory:
return True
return bool(sqlQuery('SELECT 1 FROM inventory WHERE hash=?', hash))
def __getitem__(self, hash):
with self.lock:
if hash in self._inventory:
return self._inventory[hash]
rows = sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?', hash)
if not rows:
raise KeyError(hash)
return self.InventoryItem(*rows[0])
def __setitem__(self, hash, value):
with self.lock:
value = self.InventoryItem(*value)
self._inventory[hash] = value
self._streams[value.stream].add(hash)
def __delitem__(self, hash):
raise NotImplementedError
def __iter__(self):
with self.lock:
hashes = self._inventory.keys()[:]
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
return hashes.__iter__()
def __len__(self):
with self.lock:
return len(self._inventory) + sqlQuery('SELECT count(*) FROM inventory')[0][0]
def by_type_and_tag(self, type, tag):
with self.lock:
values = [value for value in self._inventory.values() if value.type == type and value.tag == tag]
values += (self.InventoryItem(*value) for value in sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE objecttype=? AND tag=?', type, tag))
return values
def hashes_by_stream(self, stream):
with self.lock:
s = self._streams[stream]
if not s:
s.update((inv_vector for inv_vector, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, int(time.time()) - 3600)))
return s
def unexpired_hashes_by_stream(self, stream):
with self.lock:
t = int(time.time())
hashes = [x for x, value in self._inventory.items() if value.stream == stream and value.expires > t]
hashes += (payload for payload, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, t))
return hashes
def flush(self):
with self.lock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
with SqlBulkExecute() as sql:
for objectHash, value in self._inventory.items():
sql.execute('INSERT INTO inventory VALUES (?, ?, ?, ?, ?, ?)', objectHash, *value)
self._inventory.clear()
def clean(self):
with self.lock:
sqlExecute('DELETE FROM inventory WHERE expirestime<?',int(time.time()) - (60 * 60 * 3))
self._streams.clear()
for objectHash, value in self.items():
self._streams[value.stream].add(objectHash)
class PendingDownloadQueue(Queue.Queue):
# keep a track of objects that have been advertised to us but we haven't downloaded them yet
maxWait = 300
def __init__(self, maxsize=0):
Queue.Queue.__init__(self, maxsize)
self.stopped = False
self.pending = {}
self.lock = RLock()
def task_done(self, hashId):
Queue.Queue.task_done(self)
try:
with self.lock:
del self.pending[hashId]
except KeyError:
pass
def get(self, block=True, timeout=None):
retval = Queue.Queue.get(self, block, timeout)
# no exception was raised
if not self.stopped:
with self.lock:
self.pending[retval] = time.time()
return retval
def clear(self):
with self.lock:
newPending = {}
for hashId in self.pending:
if self.pending[hashId] + PendingDownloadQueue.maxWait > time.time():
newPending[hashId] = self.pending[hashId]
self.pending = newPending
@staticmethod
def totalSize():
size = 0
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'downloadQueue'):
size += thread.downloadQueue.qsize() + len(thread.downloadQueue.pending)
return size
@staticmethod
def stop():
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'downloadQueue'):
thread.downloadQueue.stopped = True
with thread.downloadQueue.lock:
thread.downloadQueue.pending = {}
class PendingUploadDeadlineException(Exception):
pass
@Singleton
class PendingUpload(object):
# keep a track of objects that we have created but haven't distributed yet
def __init__(self):
super(self.__class__, self).__init__()
self.lock = RLock()
self.hashes = {}
# end by this time in any case
self.deadline = 0
self.maxLen = 0
# during shutdown, wait up to 20 seconds to finish uploading
self.shutdownWait = 20
# forget tracking objects after 60 seconds
self.objectWait = 60
# wait 10 seconds between clears
self.clearDelay = 10
self.lastCleared = time.time()
def add(self, objectHash = None):
with self.lock:
# add a new object into existing thread lists
if objectHash:
if objectHash not in self.hashes:
self.hashes[objectHash] = {'created': time.time(), 'sendCount': 0, 'peers': []}
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'peer') and \
thread.peer not in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].append(thread.peer)
# add all objects into the current thread
else:
for objectHash in self.hashes:
if current_thread().peer not in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].append(current_thread().peer)
def len(self):
self.clearHashes()
with self.lock:
return sum(1
for x in self.hashes if (self.hashes[x]['created'] + self.objectWait < time.time() or
self.hashes[x]['sendCount'] == 0))
def _progress(self):
with self.lock:
return float(sum(len(self.hashes[x]['peers'])
for x in self.hashes if (self.hashes[x]['created'] + self.objectWait < time.time()) or
self.hashes[x]['sendCount'] == 0))
def progress(self, raiseDeadline=True):
if self.maxLen < self._progress():
self.maxLen = self._progress()
if self.deadline < time.time():
if self.deadline > 0 and raiseDeadline:
raise PendingUploadDeadlineException
self.deadline = time.time() + 20
try:
return 1.0 - self._progress() / self.maxLen
except ZeroDivisionError:
return 1.0
def clearHashes(self, objectHash=None):
if objectHash is None:
if self.lastCleared > time.time() - self.clearDelay:
return
objects = self.hashes.keys()
else:
objects = objectHash,
with self.lock:
for i in objects:
try:
if self.hashes[i]['sendCount'] > 0 and (
len(self.hashes[i]['peers']) == 0 or
self.hashes[i]['created'] + self.objectWait < time.time()):
del self.hashes[i]
except KeyError:
pass
self.lastCleared = time.time()
def delete(self, objectHash=None):
if not hasattr(current_thread(), 'peer'):
return
if objectHash is None:
return
with self.lock:
try:
if objectHash in self.hashes and current_thread().peer in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['sendCount'] += 1
self.hashes[objectHash]['peers'].remove(current_thread().peer)
except KeyError:
pass
self.clearHashes(objectHash)
def stop(self):
with self.lock:
self.hashes = {}
def threadEnd(self):
with self.lock:
for objectHash in self.hashes:
try:
if current_thread().peer in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].remove(current_thread().peer)
except KeyError:
pass
self.clearHashes()
| mit | 2,504,208,822,498,780,700 | 39.48996 | 361 | 0.583515 | false |
kaiyou/pyircbot | src/pyircbot/behavior.py | 1 | 6125 | #!/usr/bin/python
#
# PyIRCBot
# Copyright (C) Pierre Jaury 2011 <[email protected]>
#
# PyIRCBot is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ognbot is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from core import BotProtocol, botcommand
from twisted.internet.defer import Deferred
from twisted.internet.task import LoopingCall
from twisted.python import log
import shelve
import new
class LoggingBotProtocol(BotProtocol):
'''
I am a bot protocol which is able to log commands and messages to
a file-like object.
'''
def privmsg (self, user, channel, message):
log.msg ('incoming %s %s %s' % (user, channel, message))
super (LoggingBotProtocol, self).privmsg (user, channel, message)
def msg (self, channel, message):
log.msg ('outgoing %s %s' % (channel, message))
super (LoggingBotProtocol, self).msg (channel, message)
def command (self, out, command, *args):
log.msg ('command %s %s' % (command, ' '.join (args)))
super (LoggingBotProtocol, self).command (out, command, *args)
class AsynchronousCallBotProtocol(BotProtocol):
'''
I am a bot protocol which implements asynchronous queries to other bots
or services (even users if really needed for a check or anything)
For every actor i can interact with, you have to provide me with a
reference handshake, so that I know when they are finished talking
For instance, if one service called DummyServ replies 'Pong!' to the
message 'ping', just add {'DummyServ': ('ping', 'Pong!')} to your factory
and I will be able to interact with it (him).
I maintain a pool of pending requests for every actor. When an actor is
finished talking, I simply fires your callback and execute the next
pending request.
'''
def _sync (self, user, channel, message):
'''
This is called when a message is recieve from one of the actors
I am connected to
'''
if self._job[channel]:
query, stop = self.factory.sync[channel]
if not message == stop:
self._buffer[channel].append (message)
else:
self._job[channel].callback (self._buffer[channel])
self._buffer[channel] = []
self._nextjob (channel)
def _nextjob (self, channel):
'''
This is called to trigger the next job in the pool if available
'''
if len(self._pool[channel]) > 0:
query, stop = self.factory.sync[channel]
d, message = self._pool[channel].pop (0)
self.msg (channel, message)
for line in query:
self.msg (channel, line)
self._buffer[channel] = []
self._job[channel] = d
else:
self._job[channel] = None
def _addjob (self, channel, message):
'''
You might use this method to add a new request message for the
actor channel, just rely on the returned deferred
'''
d = Deferred ()
self._pool[channel].append ((d, message))
if not self._job[channel]:
self._nextjob (channel)
return d
def connectionMade (self):
'''
Initialization of specific attributes
'''
self._pool = dict([(key, []) for key in self.factory.sync])
self._job = dict([(key, None) for key in self.factory.sync])
self._buffer = dict([(key, []) for key in self.factory.sync])
super(AsynchronousCallBotProtocol, self).connectionMade ()
def _handle (self, user, channel, message, wrap = False):
'''
Triggers the _sync method if necessary
'''
if channel in self.factory.sync:
self._sync (user, channel, message)
return super(AsynchronousCallBotProtocol, self)._handle (user, channel, message, wrap)
class AliasBotProtocol (BotProtocol):
'''
I am a bot protocol which implement command aliases
'''
def connectionMade (self):
'''
Initialization of specific attributes
'''
self._aliases = {}
self._aliases = shelve.open('aliases.db', flag='c', protocol=None,
writeback=True)
loop = LoopingCall (self._aliases.sync)
loop.start (10)
super(AliasBotProtocol, self).connectionMade ()
@botcommand
def setAlias (self, flow, out, user, channel, name, *command):
'''
\x02setAlias\x02 <name> <command line>
Saves the given command line as responding to the specified name
Every '=>' in the command line will be replaced by the piping pattern
Arguments to the alias can be retrived using %(0)s, %(1)s, etc.
\x02Aliases shall not be piped to other commands for now.\x02
'''
if name in dir (self) or name.startswith ('_'):
out.append ('\x02Error\x02: illegal alias name')
else:
command = ' '.join (command).replace ('=>', '->')
self._aliases[name] = command
out.append ('\x02Saved %s as\x02: %s' % (name, command))
@botcommand
def listAliases (self, flow, out, user, channel):
'''
\x02listAliases\x02
Lists currently defined aliases
'''
if len (self._aliases.keys ()) == 0:
out.append ('\x02Notice\x02 No alias is currently defined')
for name, command in self._aliases.items ():
out.append ('\x02%s:\x02 %s' % (name, command))
@botcommand
def delAlias (self, flow, out, user, channel, name):
'''
\x02delAlias\x02 <name>
Deletes the specified alias
'''
if name not in self._aliases:
out.append ('\x02Warning\x02 Unkown alias %s' % name)
out.append ('Deleted alias \x02%s\x02' % name)
del self._aliases[name]
def _check (self, user, channel, command, args):
return (super(AliasBotProtocol, self)._check (user, channel, command, args)
or command in self._aliases)
def __getattr__ (self, name):
if name in self._aliases:
def f (self, flow, out, user, channel, *args):
args = dict (zip (map (str, range (len (args))), args))
d = self._handle (user, channel, self._aliases[name] % args, True)
d.callback (flow)
return d
return new.instancemethod (f, self, self.__class__)
| gpl-3.0 | 1,358,964,609,801,808,000 | 33.217877 | 88 | 0.691429 | false |
openstack/octavia | octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py | 1 | 2762 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.controller.worker.v1.flows import health_monitor_flows
import octavia.tests.unit.base as base
class TestHealthMonitorFlows(base.TestCase):
def setUp(self):
self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows()
super().setUp()
def test_get_create_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_create_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.POOL, health_mon_flow.requires)
self.assertEqual(4, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
def test_get_delete_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_delete_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.POOL, health_mon_flow.requires)
self.assertEqual(4, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
def test_get_update_health_monitor_flow(self):
health_mon_flow = (self.HealthMonitorFlow.
get_update_health_monitor_flow())
self.assertIsInstance(health_mon_flow, flow.Flow)
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires)
self.assertEqual(5, len(health_mon_flow.requires))
self.assertEqual(0, len(health_mon_flow.provides))
| apache-2.0 | 1,353,852,472,475,876,000 | 37.361111 | 75 | 0.709631 | false |
martinblech/mimerender | src/test.py | 1 | 10283 | # unit tests
try:
import unittest2 as unittest
except ImportError:
import unittest
import mimerender
from mimerender import _MIME_TYPES
class TestMimeRender(mimerender.MimeRenderBase):
def __init__(self, request_parameters=None, accept_header=None,
*args, **kwargs):
super(TestMimeRender, self).__init__(*args, **kwargs)
self.request_parameters = request_parameters or {}
self.accept_header = accept_header
self.ctx = {}
self.headers = {}
def _get_request_parameter(self, key, default=None):
return self.request_parameters.get(key, default)
def _get_accept_header(self, default=None):
return self.accept_header
def _set_context_var(self, key, value):
self.ctx[key] = value
def _clear_context_var(self, key):
del self.ctx[key]
def _make_response(self, content, headers, status):
self.status = status
for k, v in headers:
self.headers[k] = v
return content
class MimeRenderTests(unittest.TestCase):
def test_single_variant_without_default(self):
mimerender = TestMimeRender()
result = mimerender(
xml=lambda x: '<xml>%s</xml>' % x,
)(lambda: dict(x='test'))()
self.assertEqual(mimerender.headers['Content-Type'], 'text/xml')
self.assertEqual(result, '<xml>test</xml>')
def test_single_variant_with_default(self):
mimerender = TestMimeRender()
result = mimerender(
xml=lambda x: '<xml>%s</xml>' % x,
default='xml'
)(lambda: dict(x='test'))()
self.assertEqual(mimerender.headers['Content-Type'], 'text/xml')
self.assertEqual(result, '<xml>test</xml>')
def test_norenderers(self):
try:
TestMimeRender()()
self.fail('should fail with ValueError')
except ValueError:
pass
def test_select_variant(self):
mimerender = TestMimeRender()
handler = mimerender(
default='txt',
override_input_key='mime',
txt=lambda x: 'txt:%s' %x,
xml=lambda x: 'xml:%s' % x,
json=lambda x: 'json:%s' % x,
html=lambda x: 'html:%s' % x,
)(lambda x: dict(x=x))
result = handler('default')
self.assertEqual(mimerender.headers['Content-Type'], 'text/plain')
self.assertEqual(result, 'txt:default')
mimerender.accept_header = 'application/xml'
result = handler('a')
self.assertEqual(mimerender.headers['Content-Type'], 'application/xml')
self.assertEqual(result, 'xml:a')
mimerender.accept_header = 'application/json'
result = handler('b')
self.assertEqual(mimerender.headers['Content-Type'], 'application/json')
self.assertEqual(result, 'json:b')
mimerender.request_parameters['mime'] = 'html'
result = handler('c')
self.assertEqual(mimerender.headers['Content-Type'], 'text/html')
self.assertEqual(result, 'html:c')
def test_default_for_wildcard_query(self):
mimerender = TestMimeRender()
mimerender.accept_header = '*/*'
mimerender(
default='xml',
txt=lambda: None,
xml=lambda: None)(lambda: {})()
self.assertEqual(mimerender.headers['Content-Type'], _MIME_TYPES['xml'][0])
mimerender(
default='txt',
txt=lambda: None,
xml=lambda: None)(lambda: {})()
self.assertEqual(mimerender.headers['Content-Type'], _MIME_TYPES['txt'][0])
def test_decorated_function_name(self):
def vanilla_function(): pass
mimerender = TestMimeRender()
decorated_function = mimerender(xml=None)(vanilla_function)
self.assertEqual(vanilla_function.__name__,
decorated_function.__name__)
def test_not_acceptable(self):
mimerender = TestMimeRender()
# default behavior, pick default even if not acceptable
handler = mimerender(
default='json',
xml=lambda x: 'xml:%s' %x,
json=lambda x: 'json:%s' %x,
)(lambda x: dict(x=x))
mimerender.accept_header = 'text/plain'
result = handler('default')
self.assertEqual(mimerender.headers['Content-Type'], 'application/json')
self.assertEqual(mimerender.status, '200 OK')
self.assertEqual(result, 'json:default')
# optional: fail with 406
handler = mimerender(
not_acceptable_callback= lambda _, sup: (
'text/plain',
'Available Content Types: ' + ', '.join(sup)),
default='json',
xml=lambda x: 'xml:%s' %x,
json=lambda x: 'json:%s' %x,
)(lambda x: dict(x=x))
mimerender.accept_header = 'text/plain'
result = handler('default')
self.assertEqual(mimerender.headers['Content-Type'], 'text/plain')
self.assertEqual(mimerender.status, '406 Not Acceptable')
self.assertTrue(result.startswith('Available Content Types: '))
self.assertTrue(result.find('application/xml') != -1)
self.assertTrue(result.find('application/json') != -1)
def test_map_exceptions(self):
class MyException1(Exception): pass
class MyException2(MyException1): pass
def failifnone(x, exception_class=Exception):
if x is None:
raise exception_class('info', 'moreinfo')
return dict(x=x)
mimerender = TestMimeRender()
handler = mimerender.map_exceptions(
mapping=((MyException2, '500 Crazy Internal Error'),
(MyException1, '400 Failed')),
default='txt',
txt=lambda exception: 'txt:%s' % exception,
xml=lambda exception: 'xml:%s' % exception,
)(mimerender(
default='txt',
txt=lambda x: 'txt:%s' %x,
xml=lambda x: 'xml:%s' % x,
)(failifnone))
# no exception thrown means normal mimerender behavior
mimerender.accept_header = 'application/xml'
result = handler('a')
self.assertEqual(mimerender.status, '200 OK')
self.assertEqual(mimerender.headers['Content-Type'], 'application/xml')
self.assertEqual(result, 'xml:a')
mimerender.accept_header = 'text/plain'
result = handler('b')
self.assertEqual(mimerender.headers['Content-Type'], 'text/plain')
self.assertEqual(mimerender.status, '200 OK')
self.assertEqual(result, 'txt:b')
# unmapped exception won't be caught
try:
result = handler(None, Exception)
self.fail('unmapped exception must not be caught')
except:
pass
# mapped exceptions are represented with an acceptable mime type
mimerender.accept_header = 'application/xml'
result = handler(None, MyException1)
self.assertEqual(mimerender.headers['Content-Type'], 'application/xml')
self.assertNotEqual(mimerender.status, '200 OK')
self.assertEqual(result, "xml:('info', 'moreinfo')")
mimerender.accept_header = 'text/plain'
result = handler(None, MyException1)
self.assertEqual(mimerender.headers['Content-Type'], 'text/plain')
self.assertNotEqual(mimerender.status, '200 OK')
self.assertEqual(result, "txt:('info', 'moreinfo')")
# mapping order matters over exception hierarchies
result = handler(None, MyException2)
self.assertEqual(mimerender.status, '500 Crazy Internal Error')
result = handler(None, MyException1)
self.assertEqual(mimerender.status, '400 Failed')
def test_vary_header(self):
mimerender = TestMimeRender()
# add vary header if absent
mimerender(xml=lambda: None)(lambda: {})()
self.assertEqual(mimerender.headers['Vary'], 'Accept')
# leave vary header untouched if accept is already there
mimerender(xml=lambda: None)(
lambda: ({}, '', (('Vary', 'Accept,X'),)))()
self.assertEqual(mimerender.headers['Vary'], 'Accept,X')
# append accept if vary header is incomplete
mimerender(xml=lambda: None)(
lambda: ({}, '', (('Vary', 'X'),)))()
self.assertEqual(mimerender.headers['Vary'], 'X,Accept')
def test_response_types(self):
mimerender = TestMimeRender()
# dict only
mimerender(xml=lambda: None)(lambda: {})()
self.assertEqual(mimerender.status, '200 OK')
self.assertEqual(mimerender.headers, {'Vary': 'Accept',
'Content-Type': 'text/xml'})
# dict + status
mimerender(xml=lambda: None)(lambda: ({}, '666 Armaggedon'))()
self.assertEqual(mimerender.status, '666 Armaggedon')
self.assertEqual(mimerender.headers, {'Vary': 'Accept',
'Content-Type': 'text/xml'})
# dict + status + headers
mimerender(xml=lambda: None)(lambda: ({}, '666 Armaggedon',
{'X-Y': 'Z'}))()
self.assertEqual(mimerender.status, '666 Armaggedon')
self.assertEqual(mimerender.headers, {'Vary': 'Accept',
'Content-Type': 'text/xml',
'X-Y': 'Z'})
def test_invalid_accept_header(self):
mimerender = TestMimeRender()
# default behavior, pick default even if not acceptable
handler = mimerender(
default='json',
xml=lambda x: 'xml:%s' %x,
json=lambda x: 'json:%s' %x,
)(lambda x: dict(x=x))
mimerender.accept_header = 'text' # invalid header
result = handler('default')
self.assertEqual(mimerender.headers['Content-Type'], 'text/plain')
self.assertEqual(mimerender.status, '400 Bad Request')
self.assertEqual(result, 'Invalid Accept header requested')
if __name__ == "__main__":
unittest.main()
| mit | 4,608,935,895,285,645,000 | 39.167969 | 83 | 0.574832 | false |
tomchadwin/qgis2web | qgis2web/maindialog.py | 1 | 39316 | # -*- coding: utf-8 -*-
# qgis-ol3 Creates OpenLayers map from QGIS layers
# Copyright (C) 2014 Victor Olaya ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
from collections import defaultdict, OrderedDict
import webbrowser
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
from qgis.core import (Qgis,
QgsWkbTypes,
QgsProject,
QgsMapLayer,
QgsVectorLayer,
QgsNetworkAccessManager,
QgsMessageLog)
# noinspection PyUnresolvedReferences
from qgis.PyQt.QtCore import (QObject,
QSettings,
pyqtSignal,
pyqtSlot,
QUrl,
QRect,
QByteArray,
QEvent,
Qt)
from qgis.PyQt.QtGui import (QIcon)
from qgis.PyQt.QtWidgets import (QAction,
QAbstractItemView,
QDialog,
QHBoxLayout,
QTreeWidgetItem,
QComboBox,
QListWidget,
QCheckBox,
QToolButton,
QWidget,
QTextBrowser)
from qgis.PyQt.uic import loadUiType
from qgis.PyQt.QtWebKitWidgets import QWebView, QWebInspector, QWebPage
from qgis.PyQt.QtWebKit import QWebSettings
import traceback
from . import utils
from qgis2web.configparams import (getParams,
specificParams,
specificOptions)
from qgis2web.olwriter import OpenLayersWriter
from qgis2web.leafletWriter import LeafletWriter
from qgis2web.mapboxWriter import MapboxWriter
from qgis2web.writerRegistry import (WRITER_REGISTRY)
from qgis2web.exporter import (EXPORTER_REGISTRY)
from qgis2web.feedbackDialog import FeedbackDialog
from qgis.gui import QgsColorButton
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
webkit_available = True
FORM_CLASS, _ = loadUiType(os.path.join(
os.path.dirname(__file__), 'ui_maindialog.ui'))
class MainDialog(QDialog, FORM_CLASS):
"""The main dialog of QGIS2Web plugin."""
items = {}
def __init__(self, iface, parent=None):
super(MainDialog, self).__init__(parent)
QDialog.__init__(self)
self.setupUi(self)
self.iface = iface
self.previewUrl = None
self.layer_search_combo = None
self.layer_filter_select = None
self.exporter_combo = None
self.feedback = FeedbackDialog(self)
self.feedback.setModal(True)
stgs = QSettings()
self.restoreGeometry(stgs.value("qgis2web/MainDialogGeometry",
QByteArray(), type=QByteArray))
self.verticalLayout_2.addStretch()
self.horizontalLayout_6.addStretch()
if stgs.value("qgis2web/previewOnStartup", Qt.Checked) == Qt.Checked:
self.previewOnStartup.setCheckState(Qt.Checked)
else:
self.previewOnStartup.setCheckState(Qt.Unchecked)
if stgs.value("qgis2web/closeFeedbackOnSuccess",
Qt.Checked) == Qt.Checked:
self.closeFeedbackOnSuccess.setCheckState(Qt.Checked)
else:
self.closeFeedbackOnSuccess.setCheckState(Qt.Unchecked)
self.previewFeatureLimit.setText(
stgs.value("qgis2web/previewFeatureLimit", "1000"))
self.appearanceParams.setSelectionMode(
QAbstractItemView.SingleSelection)
self.preview = None
if webkit_available:
widget = QWebView()
self.preview = widget
try:
# if os.environ["TRAVIS"]:
self.preview.setPage(WebPage())
except Exception:
print("Failed to set custom webpage")
webview = self.preview.page()
webview.setNetworkAccessManager(QgsNetworkAccessManager.instance())
self.preview.settings().setAttribute(
QWebSettings.DeveloperExtrasEnabled, True)
self.preview.settings().setAttribute(
QWebSettings.DnsPrefetchEnabled, True)
else:
widget = QTextBrowser()
widget.setText(self.tr('Preview is not available since QtWebKit '
'dependency is missing on your system'))
self.right_layout.insertWidget(0, widget)
self.populateConfigParams(self)
self.populate_layers_and_groups(self)
self.populateLayerSearch()
self.populateAttrFilter()
writer = WRITER_REGISTRY.createWriterFromProject()
self.setStateToWriter(writer)
self.exporter = EXPORTER_REGISTRY.createFromProject()
self.exporter_combo.setCurrentIndex(
self.exporter_combo.findText(self.exporter.name()))
self.exporter_combo.currentIndexChanged.connect(
self.exporterTypeChanged)
self.toggleOptions()
if webkit_available:
if self.previewOnStartup.checkState() == Qt.Checked:
self.autoUpdatePreview()
self.buttonPreview.clicked.connect(self.previewMap)
else:
self.buttonPreview.setDisabled(True)
QgsProject.instance().cleared.connect(self.reject)
self.layersTree.model().dataChanged.connect(self.populateLayerSearch)
self.layersTree.model().dataChanged.connect(self.populateAttrFilter)
self.ol3.clicked.connect(self.changeFormat)
self.leaflet.clicked.connect(self.changeFormat)
self.mapbox.clicked.connect(self.changeFormat)
self.buttonExport.clicked.connect(self.saveMap)
helpText = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"helpFile.md")
self.helpField.setSource(QUrl.fromLocalFile(helpText))
if webkit_available:
self.devConsole = QWebInspector(self.preview)
self.devConsole.setFixedHeight(0)
self.devConsole.setObjectName("devConsole")
self.devConsole.setPage(self.preview.page())
self.devConsole.hide()
self.right_layout.insertWidget(1, self.devConsole)
self.filter = devToggleFilter()
self.filter.devToggle.connect(self.showHideDevConsole)
self.installEventFilter(self.filter)
self.setModal(False)
@pyqtSlot(bool)
def showHideDevConsole(self, visible):
self.devConsole.setVisible(visible)
def changeFormat(self):
self.autoUpdatePreview()
self.toggleOptions()
def exporterTypeChanged(self):
new_exporter_name = self.exporter_combo.currentText()
try:
self.exporter = [
e for e in EXPORTER_REGISTRY.getExporters()
if e.name() == new_exporter_name][0]()
except Exception:
pass
def currentMapFormat(self):
"""
Returns the currently selected map writer type
"""
return self.getWriterFactory().type()
def getWriterFactory(self):
"""
Returns a factory to create the currently selected map writer
"""
if self.mapFormat.checkedButton() == self.ol3:
return OpenLayersWriter
elif self.mapFormat.checkedButton() == self.leaflet:
return LeafletWriter
elif self.mapFormat.checkedButton() == self.mapbox:
return MapboxWriter
def createWriter(self):
"""
Creates a writer object reflecting the current settings
in the dialog
"""
writer = self.getWriterFactory()()
(writer.layers, writer.groups, writer.popup,
writer.visible, writer.interactive, writer.json,
writer.cluster, writer.getFeatureInfo) = self.getLayersAndGroups()
writer.params = self.getParameters()
return writer
def showErrorMessage(self, error):
"""
Shows an error message in the preview window
"""
html = "<html>"
html += "<head></head>"
html += "<style>body {font-family: sans-serif;}</style>"
html += "<body><h1>Error</h1>"
html += "<p>qgis2web produced an error:</p><code>"
html += error
html += "</code></body></html>"
if self.preview:
self.preview.setHtml(html)
def showFeedbackMessage(self, title, message):
"""
Shows a feedback message in the preview window
"""
html = "<html>"
html += "<head></head>"
html += "<style>body {font-family: sans-serif;}</style>"
html += "<body><h1>{}</h1>".format(title)
html += "<p>{}</p>".format(message)
html += "</body></html>"
if self.preview:
self.preview.setHtml(html)
def toggleOptions(self):
currentWriter = self.getWriterFactory()
for param, value in specificParams.items():
treeParam = self.appearanceParams.findItems(
param, Qt.MatchExactly | Qt.MatchRecursive)[0]
if currentWriter == OpenLayersWriter:
if value == "OL3":
treeParam.setDisabled(False)
if treeParam.combo:
treeParam.combo.setEnabled(True)
else:
treeParam.setDisabled(True)
if treeParam.combo:
treeParam.combo.setEnabled(False)
else:
if value == "OL3":
treeParam.setDisabled(True)
if treeParam.combo:
treeParam.combo.setEnabled(False)
else:
treeParam.setDisabled(False)
if treeParam.combo:
treeParam.combo.setEnabled(True)
for option, value in specificOptions.items():
treeOptions = self.layersTree.findItems(option, Qt.MatchExactly |
Qt.MatchRecursive)
for treeOption in treeOptions:
if currentWriter == OpenLayersWriter:
if value == "OL3":
treeOption.setDisabled(False)
else:
treeOption.setDisabled(True)
else:
if value == "OL3":
treeOption.setDisabled(True)
else:
treeOption.setDisabled(False)
def createPreview(self):
writer = self.createWriter()
return writer.write(self.iface,
dest_folder=utils.tempFolder()).index_file
def shouldAutoPreview(self):
"""
Returns a tuple, with a bool for whether the preview should
automatically be generated, and a string for explanations
as to why the preview cannot be automatically generated
"""
writer = self.createWriter()
total_features = 0
for layer in writer.layers:
if isinstance(layer, QgsVectorLayer):
total_features += layer.featureCount()
if total_features > int(self.previewFeatureLimit.text()):
# Too many features => too slow!
return (False, self.tr('<p>A large number of features are '
'present in the map. Generating the '
'preview may take some time.</p>'
'<p>Click Update Preview to generate the '
'preview anyway.</p>'))
return (True, None)
def autoUpdatePreview(self):
"""
Triggered when a preview will be automatically generated, i.e.
not as a result of the user manually clicking the
Update Preview button.
"""
(auto_preview, message) = self.shouldAutoPreview()
if not auto_preview:
self.showFeedbackMessage(self.tr('Preview Map'), message)
else:
self.previewMap()
def previewMap(self):
preview_file = self.createPreview()
self.loadPreviewFile(preview_file)
def saveMap(self):
writer = self.createWriter()
write_folder = self.exporter.exportDirectory()
if not write_folder:
return
self.feedback.reset()
self.feedback.show()
results = writer.write(self.iface,
dest_folder=write_folder,
feedback=self.feedback)
self.feedback.showFeedback('Success')
if self.closeFeedbackOnSuccess.checkState() == Qt.Checked:
self.feedback.close()
result = self.exporter.postProcess(results, feedback=self.feedback)
if result and (not os.environ.get('CI') and
not os.environ.get('TRAVIS')):
webbrowser.open_new_tab(self.exporter.destinationUrl())
def populate_layers_and_groups(self, dlg):
"""Populate layers on QGIS into our layers and group tree view."""
root_node = QgsProject.instance().layerTreeRoot()
tree_groups = []
tree_layers = root_node.findLayers()
self.layers_item = QTreeWidgetItem()
self.layers_item.setText(0, "Layers and Groups")
self.layersTree.setColumnCount(3)
for tree_layer in tree_layers:
layer = tree_layer.layer()
if (layer.type() != QgsMapLayer.PluginLayer and
(layer.type() != QgsMapLayer.VectorLayer or
layer.wkbType() != QgsWkbTypes.NoGeometry) and
layer.customProperty("ol_layer_type") is None):
try:
# if layer.type() == QgsMapLayer.VectorLayer:
# testDump = layer.renderer().dump()
layer_parent = tree_layer.parent()
if layer_parent.parent() is None:
item = TreeLayerItem(self.iface, layer,
self.layersTree, dlg)
self.layers_item.addChild(item)
else:
if layer_parent not in tree_groups:
tree_groups.append(layer_parent)
except Exception:
QgsMessageLog.logMessage(traceback.format_exc(),
"qgis2web",
level=Qgis.Critical)
for tree_group in tree_groups:
group_name = tree_group.name()
group_layers = [
tree_layer.layer() for tree_layer in tree_group.findLayers()]
item = TreeGroupItem(group_name, group_layers, self.layersTree)
self.layers_item.addChild(item)
self.layersTree.addTopLevelItem(self.layers_item)
self.layersTree.expandAll()
self.layersTree.resizeColumnToContents(0)
self.layersTree.resizeColumnToContents(1)
for i in range(self.layers_item.childCount()):
item = self.layers_item.child(i)
if item.checkState(0) != Qt.Checked:
item.setExpanded(False)
def populateLayerSearch(self):
self.layer_search_combo.clear()
self.layer_search_combo.addItem("None")
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
for count, layer in enumerate(layers):
if layer.type() == layer.VectorLayer:
options = []
fields = layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
options.append(f.name())
for option in options:
displayStr = layer.name() + ": " + option
self.layer_search_combo.insertItem(0, displayStr)
sln = utils.safeName(layer.name())
self.layer_search_combo.setItemData(
self.layer_search_combo.findText(displayStr),
sln + "_" + str(count))
def populateAttrFilter(self):
self.layer_filter_select.clear()
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
options = []
for count, layer in enumerate(layers):
if layer.type() == layer.VectorLayer:
fields = layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
if utils.boilType(f.typeName()) in ["int", "str", "real",
"date", "bool",
"time", "datetime"]:
options.append([f.name() + ": " +
utils.boilType(f.typeName()),
layer.name()])
preCleanOptions = {}
for entry in options:
if entry[0] not in list(preCleanOptions.keys()):
preCleanOptions[entry[0]] = ": " + entry[1]
else:
preCleanOptions[entry[0]] = "| ".join(
[preCleanOptions[entry[0]], entry[1]])
options = []
for key, value in preCleanOptions.items():
options.append(key + value)
cleanOptions = list(set(options))
for option in cleanOptions:
self.layer_filter_select.insertItem(0, option)
def configureExporter(self):
self.exporter.configure()
def populateConfigParams(self, dlg):
""" Populates the dialog with option items and widgets """
self.items = defaultdict(dict)
tree = dlg.appearanceParams
configure_export_action = QAction('...', self)
configure_export_action.triggered.connect(self.configureExporter)
params = getParams(configure_exporter_action=configure_export_action)
for group, settings in params.items():
if group != "Data export":
item = QTreeWidgetItem()
item.setText(0, group)
for param, value in settings.items():
subitem = self.createOptionItem(tree_widget=tree,
parent_item=item,
parameter=param,
default_value=value)
item.addChild(subitem)
self.items[group][param] = subitem
self.appearanceParams.addTopLevelItem(item)
item.sortChildren(0, Qt.AscendingOrder)
self.appearanceParams.expandAll()
self.appearanceParams.resizeColumnToContents(0)
self.appearanceParams.resizeColumnToContents(1)
self.layer_search_combo.removeItem(1)
self.layer_filter_select.takeItem(1)
# configure export params in separate tab
exportTree = dlg.exportParams
for group, settings in params.items():
if group == "Data export":
item = QTreeWidgetItem()
item.setText(0, group)
for param, value in settings.items():
subitem = self.createOptionItem(tree_widget=exportTree,
parent_item=item,
parameter=param,
default_value=value)
item.addChild(subitem)
self.items[group][param] = subitem
self.exportParams.addTopLevelItem(item)
item.sortChildren(0, Qt.AscendingOrder)
self.exportParams.expandAll()
self.exportParams.resizeColumnToContents(0)
self.exportParams.resizeColumnToContents(1)
def createOptionItem(self, tree_widget, parent_item,
parameter, default_value):
"""create the tree item corresponding to an option parameter"""
action = None
if isinstance(default_value, dict):
action = default_value['action']
default_value = default_value['option']
subitem = TreeSettingItem(parent_item, tree_widget,
parameter, default_value, action)
if parameter == 'Layer search':
self.layer_search_combo = subitem.combo
if parameter == 'Attribute filter':
self.layer_filter_select = subitem.list
elif parameter == 'Exporter':
self.exporter_combo = subitem.combo
return subitem
def setStateToWriter(self, writer):
"""
Sets the dialog state to match the specified writer
"""
self.selectMapFormat(writer)
self.setStateToParams(writer.params)
def setStateToParams(self, params):
"""
Sets the dialog state to match the specified parameters
"""
for group, settings in self.items.items():
for param, item in settings.items():
value = params[group][param]
item.setValue(value)
def selectMapFormat(self, writer):
"""
Updates dialog state to match the specified writer format
"""
self.ol3.setChecked(isinstance(writer, OpenLayersWriter))
self.leaflet.setChecked(isinstance(writer, LeafletWriter))
self.mapbox.setChecked(isinstance(writer, MapboxWriter))
def loadPreviewFile(self, file):
"""
Loads a web based preview from a local file path
"""
self.previewUrl = QUrl.fromLocalFile(file)
if self.preview:
self.preview.settings().clearMemoryCaches()
self.preview.setUrl(self.previewUrl)
def getParameters(self):
parameters = defaultdict(dict)
for group, settings in self.items.items():
for param, item in settings.items():
if param in ('Widget Icon', 'Widget Background'):
parameters[group][param] = item._value.color().name()
else:
parameters[group][param] = item.value()
if param == "Layer search":
parameters["Appearance"]["Search layer"] = (
self.layer_search_combo.itemData(
self.layer_search_combo.currentIndex()))
if param == "Attribute filter":
parameters["Appearance"]["Attribute filter"] = (
self.layer_filter_select.selectedItems())
return parameters
def saveParameters(self):
"""
Saves current dialog state to project
"""
WRITER_REGISTRY.saveWriterToProject(self.createWriter())
EXPORTER_REGISTRY.writeToProject(self.exporter)
def getLayersAndGroups(self):
layers = []
groups = {}
popup = []
visible = []
interactive = []
json = []
cluster = []
getFeatureInfo = []
for i in range(self.layers_item.childCount()):
item = self.layers_item.child(i)
if isinstance(item, TreeLayerItem):
if item.checkState(0) == Qt.Checked:
layers.append(item.layer)
popup.append(item.popup)
visible.append(item.visible)
interactive.append(item.interactive)
json.append(item.json)
cluster.append(item.cluster)
getFeatureInfo.append(item.getFeatureInfo)
else:
group = item.name
groupLayers = []
if item.checkState(0) != Qt.Checked:
continue
for layer in item.layers:
groupLayers.append(layer)
layers.append(layer)
popup.append({})
if item.visible:
visible.append(True)
else:
visible.append(False)
if item.interactive:
interactive.append(True)
else:
interactive.append(False)
if hasattr(item, "json") and item.json:
json.append(True)
else:
json.append(False)
if hasattr(item, "cluster") and item.cluster:
cluster.append(True)
else:
cluster.append(False)
if hasattr(item, "getFeatureInfo") and item.getFeatureInfo:
getFeatureInfo.append(True)
else:
getFeatureInfo.append(False)
groups[group] = groupLayers[::-1]
return (layers[::-1],
groups,
popup[::-1],
visible[::-1],
interactive[::-1],
json[::-1],
cluster[::-1],
getFeatureInfo[::-1])
def reject(self):
self.saveParameters()
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
try:
for layer, pop, vis, int in zip(layers, popup, visible,
interactive):
attrDict = {}
for attr in pop:
attrDict['attr'] = pop[attr]
layer.setCustomProperty("qgis2web/popup/" + attr,
pop[attr])
layer.setCustomProperty("qgis2web/Visible", vis)
layer.setCustomProperty("qgis2web/Interactive", int)
except Exception:
pass
QSettings().setValue(
"qgis2web/MainDialogGeometry", self.saveGeometry())
QSettings().setValue("qgis2web/previewOnStartup",
self.previewOnStartup.checkState())
QSettings().setValue("qgis2web/closeFeedbackOnSuccess",
self.closeFeedbackOnSuccess.checkState())
QSettings().setValue("qgis2web/previewFeatureLimit",
self.previewFeatureLimit.text())
QDialog.close(self)
def closeEvent(self, event):
try:
if self.devConsole or self.devConsole.isVisible() and self.preview:
del self.devConsole
del self.preview
self.reject()
event.accept()
except Exception:
pass
class devToggleFilter(QObject):
devToggle = pyqtSignal(bool)
def eventFilter(self, obj, event):
try:
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_F12:
self.devToggle.emit(not obj.devConsole.isVisible())
if obj.devConsole.height() != 0:
obj.devConsole.setFixedHeight(0)
else:
obj.devConsole.setFixedHeight(168)
return True
except Exception:
pass
return False
class TreeGroupItem(QTreeWidgetItem):
groupIcon = QIcon(os.path.join(os.path.dirname(__file__), "icons",
"group.gif"))
def __init__(self, name, layers, tree):
QTreeWidgetItem.__init__(self)
self.layers = layers
self.name = name
self.setText(0, name)
self.setIcon(0, self.groupIcon)
self.setCheckState(0, Qt.Checked)
self.visibleItem = QTreeWidgetItem(self)
self.visibleCheck = QCheckBox()
self.visibleCheck.setChecked(True)
self.visibleItem.setText(0, "Visibility")
self.addChild(self.visibleItem)
tree.setItemWidget(self.visibleItem, 1, self.visibleCheck)
self.interactiveItem = QTreeWidgetItem(self)
self.interactiveCheck = QCheckBox()
self.interactiveCheck.setChecked(True)
self.interactiveItem.setText(0, "Popups")
self.addChild(self.interactiveItem)
tree.setItemWidget(self.interactiveItem, 1, self.interactiveCheck)
@property
def visible(self):
return self.visibleCheck.isChecked()
@property
def interactive(self):
return self.interactiveCheck.isChecked()
class TreeLayerItem(QTreeWidgetItem):
layerIcon = QIcon(os.path.join(os.path.dirname(__file__), "icons",
"layer.png"))
def __init__(self, iface, layer, tree, dlg):
QTreeWidgetItem.__init__(self)
self.iface = iface
self.layer = layer
self.setText(0, layer.name())
self.setIcon(0, self.layerIcon)
project = QgsProject.instance()
if project.layerTreeRoot().findLayer(layer.id()).isVisible():
self.setCheckState(0, Qt.Checked)
else:
self.setCheckState(0, Qt.Unchecked)
self.visibleItem = QTreeWidgetItem(self)
self.visibleCheck = QCheckBox()
vis = layer.customProperty("qgis2web/Visible", True)
if vis == 0 or str(vis).lower() == "false":
self.visibleCheck.setChecked(False)
else:
self.visibleCheck.setChecked(True)
self.visibleItem.setText(0, "Visible")
self.addChild(self.visibleItem)
tree.setItemWidget(self.visibleItem, 1, self.visibleCheck)
self.interactiveItem = QTreeWidgetItem(self)
self.interactiveCheck = QCheckBox()
int = True
if int == 0 or str(int).lower() == "false":
self.interactiveCheck.setChecked(False)
else:
self.interactiveCheck.setChecked(True)
self.interactiveItem.setText(0, "Popups")
self.addChild(self.interactiveItem)
tree.setItemWidget(self.interactiveItem, 1, self.interactiveCheck)
if layer.type() == layer.VectorLayer:
if layer.providerType() == 'WFS':
self.jsonItem = QTreeWidgetItem(self)
self.jsonCheck = QCheckBox()
if layer.customProperty("qgis2web/Encode to JSON") == 2:
self.jsonCheck.setChecked(True)
self.jsonItem.setText(0, "Encode to JSON")
self.jsonCheck.stateChanged.connect(self.changeJSON)
self.addChild(self.jsonItem)
tree.setItemWidget(self.jsonItem, 1, self.jsonCheck)
if layer.geometryType() == QgsWkbTypes.PointGeometry:
self.clusterItem = QTreeWidgetItem(self)
self.clusterCheck = QCheckBox()
if layer.customProperty("qgis2web/Cluster") == 2:
self.clusterCheck.setChecked(True)
self.clusterItem.setText(0, "Cluster")
self.clusterCheck.stateChanged.connect(self.changeCluster)
self.addChild(self.clusterItem)
tree.setItemWidget(self.clusterItem, 1, self.clusterCheck)
self.popupItem = QTreeWidgetItem(self)
self.popupItem.setText(0, "Popup fields")
options = []
fields = self.layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
options.append(f.name())
for option in options:
self.attr = QTreeWidgetItem(self)
self.attrWidget = QComboBox()
self.attrWidget.addItem("no label")
self.attrWidget.addItem("inline label")
self.attrWidget.addItem("header label")
custProp = layer.customProperty("qgis2web/popup/" + option)
if (custProp != "" and custProp is not None):
self.attrWidget.setCurrentIndex(
self.attrWidget.findText(
layer.customProperty("qgis2web/popup/" + option)))
self.attr.setText(1, option)
self.popupItem.addChild(self.attr)
tree.setItemWidget(self.attr, 2, self.attrWidget)
self.addChild(self.popupItem)
else:
if layer.providerType() == 'wms':
self.getFeatureInfoItem = QTreeWidgetItem(self)
self.getFeatureInfoCheck = QCheckBox()
if layer.customProperty("qgis2web/GetFeatureInfo") == 2:
self.getFeatureInfoCheck.setChecked(True)
self.getFeatureInfoItem.setText(0, "Enable GetFeatureInfo?")
self.getFeatureInfoCheck.stateChanged.connect(
self.changeGetFeatureInfo)
self.addChild(self.getFeatureInfoItem)
tree.setItemWidget(self.getFeatureInfoItem, 1,
self.getFeatureInfoCheck)
@property
def popup(self):
popup = []
self.tree = self.treeWidget()
for p in range(self.childCount()):
item = self.child(p).text(1)
if item != "":
popupVal = self.tree.itemWidget(self.child(p), 2).currentText()
pair = (item, popupVal)
popup.append(pair)
popup = OrderedDict(popup)
return popup
@property
def visible(self):
return self.visibleCheck.isChecked()
@property
def interactive(self):
return self.interactiveCheck.isChecked()
@property
def json(self):
try:
return self.jsonCheck.isChecked()
except Exception:
return False
@property
def cluster(self):
try:
return self.clusterCheck.isChecked()
except Exception:
return False
@property
def getFeatureInfo(self):
try:
return self.getFeatureInfoCheck.isChecked()
except Exception:
return False
def changeJSON(self, isJSON):
self.layer.setCustomProperty("qgis2web/Encode to JSON", isJSON)
def changeCluster(self, isCluster):
self.layer.setCustomProperty("qgis2web/Cluster", isCluster)
def changeGetFeatureInfo(self, isGetFeatureInfo):
self.layer.setCustomProperty("qgis2web/GetFeatureInfo",
isGetFeatureInfo)
class TreeSettingItem(QTreeWidgetItem):
def __init__(self, parent, tree, name, value, action=None):
QTreeWidgetItem.__init__(self, parent)
self.parent = parent
self.tree = tree
self.name = name
self._value = value
self.combo = None
self.list = None
self.setText(0, name)
widget = None
if isinstance(value, QgsColorButton):
widget = value
elif isinstance(value, bool):
if value:
self.setCheckState(1, Qt.Checked)
else:
self.setCheckState(1, Qt.Unchecked)
elif isinstance(value, tuple):
self.combo = QComboBox()
self.combo.setSizeAdjustPolicy(0)
for option in value:
self.combo.addItem(option)
widget = self.combo
elif isinstance(value, list):
self.list = QListWidget()
self.list.setSizeAdjustPolicy(0)
self.list.setSelectionMode(QListWidget.MultiSelection)
for option in value:
self.list.addItem(option)
widget = self.list
else:
self.setText(1, unicode(value))
if action:
layout = QHBoxLayout()
layout.setMargin(0)
if widget:
layout.addWidget(widget)
button = QToolButton()
button.setDefaultAction(action)
button.setText(action.text())
layout.addWidget(button)
layout.addStretch(1)
widget = QWidget()
widget.setLayout(layout)
if widget:
self.tree.setItemWidget(self, 1, widget)
def setValue(self, value):
if isinstance(value, bool):
if value:
self.setCheckState(1, Qt.Checked)
else:
self.setCheckState(1, Qt.Unchecked)
elif self.combo:
index = self.combo.findText(value)
if index != -1:
self.combo.setCurrentIndex(index)
else:
self.setText(1, str(value))
def value(self):
if isinstance(self._value, bool):
return self.checkState(1) == Qt.Checked
elif isinstance(self._value, (int, float)):
return float(self.text(1))
elif isinstance(self._value, tuple):
return self.combo.currentText()
else:
return self.text(1)
class WebPage(QWebPage):
"""
Makes it possible to use a Python logger to print javascript
console messages
"""
def __init__(self, logger=None, parent=None):
super(WebPage, self).__init__(parent)
def javaScriptConsoleMessage(self, msg, lineNumber, sourceID):
if (msg != ("Unable to get image data from canvas because "
"the canvas has been tainted by cross-origin data.") and
msg != ("Deprecated include of L.Mixin.Events: this property "
"will be removed in future releases, please inherit "
"from L.Evented instead.") and
os.environ.get('CI') and os.environ.get('TRAVIS')):
raise jsException("JS %s:%d\n%s" % (sourceID, lineNumber, msg),
Exception())
class jsException(Exception):
def __init__(self, message, errors):
# Call the base class constructor with the parameters it needs
super(jsException, self).__init__(message)
# Now for your custom code...
self.errors = errors
| gpl-2.0 | -2,166,607,151,134,969,000 | 38.83384 | 79 | 0.555321 | false |
inean/python-oauth2 | oauth2/__init__.py | 1 | 24530 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
READ_BUFFER_CHUNK_SIZE = 128 * 1024
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_session_handle(length=10):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
session_handle = None
expires_in = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def set_session_handle(self, handle=None):
if handle is not None:
self.session_handle = handle
else:
self.session_handle = generate_session_handle()
def set_expires_in(self, expires):
self.expires_in = expires
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
# Token expiration. Only send session_handle is server has set
# a proper expires_in value (integer > 0)
if self.session_handle is not None and self.expires_in:
data['oauth_session_handle'] = self.session_handle
data['expiresIn'] = self.expires_in
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
hsh = sha()
#pylint: disable-msg=E1103
if self.body and hasattr(self.body, 'tell'):
# remenber current pos
curpos = self.body.tell()
while(True):
# read chunks (128Kb)
chunk = self.body.read(READ_BUFFER_CHUNK_SIZE)
if chunk == '':
break
# update hash
hsh.update(chunk)
# reset seek
self.body.seek(curpos)
else:
# default implementation
hsh.update(self.body)
self['oauth_body_hash'] = base64.b64encode(hsh.digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | 6,820,572,739,031,814,000 | 32.37415 | 265 | 0.600163 | false |
pombredanne/cliques | poll/views.py | 1 | 5047 | from collections import defaultdict
import datetime
import logging
import operator
import random
from django.contrib.auth import get_user_model
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.utils.timezone import utc
from django.views.generic import CreateView, DetailView
import notify.utils
from poll.models import Vote, Submission, SubmissionForm, Poll
from website.models import Post, UserProfile, Comment
logger = logging.getLogger()
def vote(request, poll_stub, submission_id):
#TODO(pcsforeducation) make this AJAX and POST only.
# if request.method != "POST":
# return HttpResponseBadRequest('Must be a POST')
try:
submission = Submission.objects.get(id=submission_id)
except:
return HttpResponseNotFound("Submission does not exist: {}".format(
submission_id
))
try:
prev_vote = Vote.objects.get(user=request.user)
except Vote.DoesNotExist:
# First vote
Vote(user=request.user, submission=submission).save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
try:
# Switch vote or undo vote
if prev_vote.submission == submission:
# Undo
prev_vote.delete()
else:
# Switch
prev_vote.delete()
Vote(user=request.user, submission=submission).save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except:
logging.exception('Could not switch vote')
raise
def cron(request):
# Get all the votes (may need to improve filtering on poll here).
#TODO(pcsforeducation) support multiple polls
poll = Poll.objects.all()[0]
submissions = defaultdict(int)
votes = Vote.objects.all()
for vote in votes:
submissions[vote.submission.id] += 1
# Eww.
top_submissions = list(reversed(sorted(submissions.iteritems(),
key=operator.itemgetter(1))))
logging.info("Top submissions: {}".format(top_submissions))
if top_submissions:
top_votes = top_submissions[0][1]
if top_votes > 0:
# Choose winning vote
winning_submissions = []
for submission in top_submissions:
logging.info("Testing submission: {}, top_votes: {}, equal? {}"
.format(submission, top_votes,
submission[0] == top_votes))
if submission[1] == top_votes:
winning_submissions.append(submission[0])
winning_index = random.randrange(0, len(winning_submissions))
_post_winning_submission(poll, winning_submissions[winning_index])
seven_days_ago = datetime.datetime.utcnow().replace(tzinfo=utc) \
- datetime.timedelta(days=7)
Submission.objects.filter(submitted__lt=seven_days_ago).delete()
return HttpResponse('ok')
def _post_winning_submission(poll, submission_id):
user = UserProfile.objects.get(username=poll.bot_name)
submission = Submission.objects.get(id=submission_id)
post = Post(user=user,
category=poll.category,
title="{}: {}".format(poll.stub, submission.title),
url=submission.url,
type='image')
post.save()
text = poll.winning_text.format(
title=poll.title,
stub=poll.stub,
username=submission.user.username)
comment = Comment(user=user,
post=post,
text=text)
comment.save()
winning_user = UserProfile.objects.get(id=submission.user.id)
winning_user.poll_votes += 1
winning_user.save()
submission.delete()
# Notify the winner they won
notify.utils.notify_users(
user_ids=[winning_user.id],
text="Your {} submission won!".format(poll.title),
link="http://www.slashertraxx.com/post/{}/".format(post.id),
type='comment',
level='info')
class PollDetailView(DetailView):
model = Poll
slug_field = 'stub'
slug_url_kwarg = 'stub'
template_name = 'poll/submission.html'
def get_context_data(self, **kwargs):
context = super(PollDetailView, self).get_context_data(**kwargs)
try:
context['vote'] = Vote.objects.get(user=self.request.user.id)
except Vote.DoesNotExist:
pass
context['form'] = SubmissionForm
return context
class SubmissionFormView(CreateView):
model = Submission
success_url = '/'
fields = ['title', 'url']
# template_name = 'website/post.html'
def form_valid(self, form):
stub = self.kwargs.get('stub')
user_model = get_user_model()
form.instance.user = user_model.objects.get(id=self.request.user.id)
form.instance.poll = Poll.objects.get(stub=stub)
self.object = form.save()
self.success_url = "/poll/{}/".format(stub)
return super(SubmissionFormView, self).form_valid(form)
| apache-2.0 | 8,324,600,533,763,739,000 | 33.806897 | 80 | 0.624728 | false |
sebastian-software/jasy | jasy/core/Util.py | 1 | 4039 | #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import re
import os
import hashlib
import tempfile
import subprocess
import sys
import shlex
import jasy.core.Console as Console
import jasy.core.Base62 as Base62
def executeCommand(args, failMessage=None, path=None, wrapOutput=True):
"""
Executes the given process and outputs failMessage when errors happen.
:param args:
:type args: str or list
:param failMessage: Message for exception when command fails
:type failMessage: str
:param path: Directory path where the command should be executed
:type path: str
:raise Exception: Raises an exception whenever the shell command fails in execution
:type wrapOutput: bool
:param wrapOutput: Whether shell output should be wrapped and returned (and passed through to Console.debug())
"""
if isinstance(args, str):
args = shlex.split(args)
prevpath = os.getcwd()
# Execute in custom directory
if path:
path = os.path.abspath(os.path.expanduser(path))
os.chdir(path)
Console.debug("Executing command: %s", " ".join(args))
Console.indent()
# Using shell on Windows to resolve binaries like "git"
if not wrapOutput:
returnValue = subprocess.call(args, shell=sys.platform == "win32")
result = returnValue
else:
output = tempfile.TemporaryFile(mode="w+t")
returnValue = subprocess.call(args, stdout=output, stderr=output, shell=sys.platform == "win32")
output.seek(0)
result = output.read().strip("\n\r")
output.close()
# Change back to previous path
os.chdir(prevpath)
if returnValue != 0 and failMessage:
raise Exception("Error during executing shell command: %s (%s)" % (failMessage, result))
if wrapOutput:
for line in result.splitlines():
Console.debug(line)
Console.outdent()
return result
SIPHASH_SUPPORTED = False
try:
import siphash
SIPHASH_SUPPORTED = True
except:
pass
def generateChecksum(key, method="base62"):
"""
Generates a unique SHA1 based hash/checksum encoded as Base62 or Hex depending on the given parameters.
:param key:
:type key: str
:param method:
:type method: str
"""
# Alternative hashing method using SIP keys:
#
# https://github.com/majek/pysiphash (Python library)
# https://github.com/jedisct1/siphash-js (Node/JS library - for Core)
#
# if SIPHASH_SUPPORTED:
# sipkey = ("JASY" * 4).encode("ascii")
# self.__checksum2 = siphash.SipHash_2_4(sipkey).update(self.__key.encode("ascii")).hexdigest()
# print("SIP Checksum: %s" % self.__checksum2.decode("ascii"))
sha1 = hashlib.sha1(key.encode("ascii"))
if method == "base62":
return Base62.encodeArrayToString(sha1.digest())
else:
return sha1.hexdigest()
def getKey(data, key, default=None):
"""
Returns the key from the data if available or the given default.
:param data: Data structure to inspect
:type data: dict
:param key: Key to lookup in dictionary
:type key: str
:param default: Default value to return when key is not set
:type default: any
"""
if key in data:
return data[key]
else:
return default
__REGEXP_DASHES = re.compile(r"\-+([\S]+)?")
__REGEXP_HYPHENATE = re.compile(r"([A-Z])")
def __camelizeHelper(match):
result = match.group(1)
return result[0].upper() + result[1:].lower()
def __hyphenateHelper(match):
return "-%s" % match.group(1).lower()
def camelize(str):
"""
Returns a camelized version of the incoming string: foo-bar-baz => fooBarBaz
:param str: Input string
"""
return __REGEXP_DASHES.sub(__camelizeHelper, str)
def hyphenate(str):
"""Returns a hyphenated version of the incoming string: fooBarBaz => foo-bar-baz
:param str: Input string
"""
return __REGEXP_HYPHENATE.sub(__hyphenateHelper, str)
| mit | 8,754,968,981,034,551,000 | 24.402516 | 114 | 0.657836 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.