repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 | -796,952,061,882,075,500 | 40.724138 | 120 | 0.562075 | false |
dani-i/bachelor-project | file_experts/data_set/cifar10_data_set_preparations.py | 1 | 3240 | from file_experts.data_set.data_set_validator import DataSetValidator
from file_experts.data_set import data_set_creator
from time import sleep
import constants.create_data_set_constants as const
import file_experts.file_expert as fe
import urllib.request
import threading
import tarfile
class Cifar10DataSetPreparations(threading.Thread):
"""
- Makes sure the Cifar 10 data set files are present and intact.
- If required it can download the Cifar 10 data set and / or extract
the data set.
"""
def __init__(
self,
progress_update_method):
"""
:param progress_update_method: GUI method that updates the download
progress.
"""
super(Cifar10DataSetPreparations, self).__init__()
self._progress_update_method = progress_update_method
#########################################################################
# Helper methods
def _make_sure_the_required_files_exist(self):
"""
- Makes sure that the Cifar10 files exist and are valid.
"""
if not fe.is_directory(const.CIFAR10_SAVE_LOCATION):
fe.crete_directory(const.CIFAR10_SAVE_LOCATION)
if self._download_cifar10():
self._extract_cifar10()
else:
if DataSetValidator.check_if_extract_is_needed():
if DataSetValidator.check_if_download_is_needed():
if not self._download_cifar10():
return
self._extract_cifar10()
def _download_cifar10(self):
"""
- Downloads Cifar10 binary version.
"""
number_of_tries = 0
while number_of_tries < const.CIFAR10_DOWNLOAD_NUMBER_OF_TRIES:
try:
urllib.request.urlretrieve(
const.CIFAR10_DOWNLOAD_LINK,
const.CIFAR10_ARCHIVE_PATH,
self._update_download_progress
)
return True
except Exception as _:
data_set_creator.cifar10_download_try_failed = True
sleep(60)
number_of_tries += 1
data_set_creator.cifar10_download_failed = True
return False
def _update_download_progress(
self,
count,
block_size,
total_size):
"""
- Calls the download progress update method, passing the percent of
the progress.
"""
self._progress_update_method(
int(count * block_size / float(total_size) * 100)
)
@staticmethod
def _extract_cifar10():
"""
- Extracts the Cifar 10 data set archive.
"""
with tarfile.open(const.CIFAR10_ARCHIVE_PATH, 'r:gz') as archive:
archive.extractall(const.CIFAR10_SAVE_LOCATION)
#########################################################################
# Public methods
def run(self):
"""
- Call this method to start the Cifar 10 data set preparations.
"""
self._make_sure_the_required_files_exist()
#########################################################################
| apache-2.0 | 8,903,633,220,386,401,000 | 27.928571 | 77 | 0.52963 | false |
freemed/orthanc | Resources/Samples/Python/ChangesLoop.py | 1 | 2310 | #!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2014 Medical Physics Department, CHU of Liege,
# Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import RestToolbox
##
## Print help message
##
if len(sys.argv) != 3:
print("""
Sample script that continuously monitors the arrival of new DICOM
images into Orthanc (through the Changes API).
Usage: %s [hostname] [HTTP port]
For instance: %s localhost 8042
""" % (sys.argv[0], sys.argv[0]))
exit(-1)
URL = 'http://%s:%d' % (sys.argv[1], int(sys.argv[2]))
##
## The following function is called each time a new instance is
## received.
##
def NewInstanceReceived(path):
global URL
patientName = RestToolbox.DoGet(URL + path + '/content/PatientName')
# Remove the possible trailing characters due to DICOM padding
patientName = patientName.strip()
print 'New instance received for patient "%s": "%s"' % (patientName, path)
##
## Main loop that listens to the changes API.
##
current = 0
while True:
r = RestToolbox.DoGet(URL + '/changes', {
'since' : current,
'limit' : 4 # Retrieve at most 4 changes at once
})
for change in r['Changes']:
# We are only interested interested in the arrival of new instances
if change['ChangeType'] == 'NewInstance':
# Call the callback function
path = change['Path']
NewInstanceReceived(path)
# Delete the instance once it has been discovered
RestToolbox.DoDelete(URL + path)
current = r['Last']
if r['Done']:
print "Everything has been processed: Waiting..."
time.sleep(1)
| gpl-3.0 | 8,063,557,085,211,736,000 | 25.860465 | 78 | 0.669697 | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/gui/database_views/filter_input_widget.py | 1 | 5786 | from PyQt5.QtWidgets import QWidget, QLineEdit, QSpinBox, QComboBox, QDateTimeEdit, QSizePolicy
from PyQt5.QtCore import Qt, pyqtSignal
from sqlalchemy import Integer, String, DateTime, Enum, Boolean
from DownloaderForReddit.guiresources.database_views.filter_input_widget_auto import Ui_FilterInputWidget
from DownloaderForReddit.database.filters import (DownloadSessionFilter, RedditObjectFilter, PostFilter, ContentFilter,
CommentFilter)
from DownloaderForReddit.utils import injector
from .filter_item import FilterItem
class FilterInputWidget(QWidget, Ui_FilterInputWidget):
export_filter = pyqtSignal(list)
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
self.setupUi(self)
self.settings_manager = injector.get_settings_manager()
self.launch_quick_filter = True
self.filter_model_map = {
'DOWNLOAD_SESSION': DownloadSessionFilter,
'REDDIT_OBJECT': RedditObjectFilter,
'POST': PostFilter,
'CONTENT': ContentFilter,
'COMMENT': CommentFilter
}
self.field_type_map = {
Boolean: self.get_boolean_field,
Integer: self.get_integer_field,
String: self.get_string_field,
DateTime: self.get_datetime_field
}
self.value_field = None
self.add_filter_button.clicked.connect(self.add_filter)
self.model_combo.currentIndexChanged.connect(self.set_fields)
self.model_list = ['DOWNLOAD_SESSION', 'REDDIT_OBJECT', 'POST', 'CONTENT', 'COMMENT']
for model in self.model_list:
self.model_combo.addItem(model.replace('_', ' ').title(), model)
operators = [('Equal To', 'eq'), ('Not Equal', 'not'), ('<', 'lt'), ('<=', 'lte'), ('>', 'gt'), ('>=', 'gte'),
('In', 'in'), ('Like', 'like'), ('Contains', 'contains')]
for x in operators:
self.operator_combo.addItem(x[0], x[1])
self.set_fields()
self.field_combo.currentIndexChanged.connect(self.set_value_field)
self.set_value_field()
self.quick_filter_combo.addItem('Quick Filters')
self.quick_filter_combo.addItems(self.settings_manager.database_view_quick_filters.keys())
self.quick_filter_combo.currentIndexChanged.connect(self.handle_quick_filter)
@property
def current_model(self):
return self.model_combo.currentData(Qt.UserRole)
@property
def current_field(self):
return self.field_combo.currentData(Qt.UserRole)
@property
def current_operator(self):
return self.operator_combo.currentData(Qt.UserRole)
def set_model_combo(self, model):
try:
self.model_combo.setCurrentIndex(self.model_list.index(model))
except IndexError:
pass
def set_fields(self):
self.field_combo.clear()
f = self.filter_model_map[self.current_model]
for field in f.get_filter_fields():
self.field_combo.addItem(field.replace('_', ' ').title(), field)
def set_value_field(self):
current_field = self.current_field
if current_field is not None:
f = self.filter_model_map[self.current_model]()
filed_type = f.get_type(current_field)
if filed_type == Enum:
field = self.get_choice_field(choices=f.get_choices(current_field))
else:
field = self.field_type_map[filed_type]()
if not isinstance(field, type(self.value_field)):
try:
self.value_layout.removeWidget(self.value_field)
self.value_field.deleteLater()
except AttributeError:
pass
self.value_field = field
self.value_layout.addWidget(self.value_field)
def get_value(self):
t = type(self.value_field)
if t == QComboBox:
return self.value_field.currentData(Qt.UserRole)
elif t == QLineEdit:
return self.value_field.text()
elif t == QSpinBox:
return self.value_field.value()
def handle_quick_filter(self):
if self.launch_quick_filter and self.quick_filter_combo.currentIndex() != 0:
self.launch_quick_filter = False
filter_name = self.quick_filter_combo.currentText()
filters = [FilterItem(**filter_dict) for filter_dict in
self.settings_manager.database_view_quick_filters[filter_name]]
self.add_filter(filters)
self.quick_filter_combo.setCurrentIndex(0)
self.launch_quick_filter = True
def add_filter(self, filters=None):
if type(filters) != list:
filters = [self.create_filter()]
self.export_filter.emit(filters)
def create_filter(self):
return FilterItem(self.current_model, self.current_field, self.current_operator, self.get_value())
def get_boolean_field(self):
combo = QComboBox()
combo.addItem('True', True)
combo.addItem('False', False)
return combo
def get_integer_field(self):
spin_box = QSpinBox()
spin_box.setMaximum(1000000000)
return spin_box
def get_string_field(self):
x = QLineEdit()
x.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
return x
def get_choice_field(self, choices):
combo = QComboBox()
for x in choices:
combo.addItem(x)
return combo
def get_datetime_field(self):
return QDateTimeEdit()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.add_filter()
| gpl-3.0 | 4,671,662,413,270,167,000 | 36.571429 | 119 | 0.612167 | false |
XYM1988/Algorithm | Chp8/8.1-General-Tree.py | 1 | 2720 | # Tree is an organizational relationship that is richer than the simple "before"
# and "after" relationships between objects in sequences.
# We define a tree T as a set of nodes storing elements such that the nodes have
# a parent-child relationship that satisfies the following properties:
# 1. If T is nonempty, it has a special node, called the root of T, that has no parent
# 2. Each node v of T different from the root has a unique parent node w; every
# node with parent w is a child of w.
# Two nodes that are children of the same parent are siblings.
# A node v is external if v has no children.
# A node v is internal if it has one or more children.
# Ancestor
# Descendant
# edge: A pair of two nodes
# path: A sequence of nodes such that any two consecutive nodes in the sequence
# form an edge.
# A tree is ordered if there is a meaningful linear order among the children of
# each node. -- Ordered Tree
class Tree:
""" Abstract base class representing a tree structure. """
class Position:
"""An abstraction representing the location of a single element."""
def element(self):
""" Return the element stored at this Position. """
raise NotImplementedError
def __eq__(self, other):
""" Return True if other Position represents the same location. """
raise NotImplementedError
def __ne__(self, other):
""" Return True if other does not represent the same location. """
return not (self == other)
def root(self):
""" Return Position representing the tree's root (or None if it's empty)"""
raise NotImplementedError
def parent(self, p):
""" Return Position representing p's parent (or None if p is root) """
raise NotImplementedError
def num_children(self, p):
""" Return the number of children that Position p has. """
raise NotImplementedError
def children(self, p):
""" Generate an iteration of Positions representing p's children. """
raise NotImplementedError
def __len__(self):
""" Return the total number of elements in the tree. """
raise NotImplementedError
def is_root(self, p):
""" Return True if Position p represents the root of the tree. """
return self.root() == p
def is_leaf(self, p):
""" Return True if Position p does not have any children. """
return self.num_children(p) == 0
def is_empty(self, p):
""" Return True if the tree is empty """
return len(self) == 0
# Depth calculation:
def depth(self, p):
if self.is_root(p):
return 0
else:
return self.depth(self.parent(p)) + 1
| mit | -5,231,915,822,319,370,000 | 37.857143 | 86 | 0.649632 | false |
anthill-platform/anthill-social | anthill/social/model/social/facebook.py | 1 | 2400 |
import datetime
from anthill.common import to_int
from anthill.common.social import APIError
from anthill.common.social.apis import FacebookAPI
from .. social import SocialAPI, SocialAuthenticationRequired
from .. token import NoSuchToken
class FacebookSocialAPI(SocialAPI, FacebookAPI):
def __init__(self, application, tokens, cache):
SocialAPI.__init__(self, application, tokens, "facebook", cache)
FacebookAPI.__init__(self, cache)
async def call(self, gamespace, account_id, method, *args, **kwargs):
"""
Makes facebook API call.
Validates everything, gathers tokens and then awaits `method` with all information.
"""
try:
token_data = await self.tokens.get_token(
gamespace,
account_id,
self.credential_type)
except NoSuchToken:
raise SocialAuthenticationRequired(self.credential_type, None)
expires_at = token_data.expires_at
access_token = token_data.access_token
data = token_data.payload
try:
if datetime.datetime.now() > expires_at:
raise SocialAuthenticationRequired(self.credential_type, token_data.username)
kwargs["access_token"] = access_token
result = await method(gamespace, *args, **kwargs)
except APIError as e:
if e.code == 401 or e.code == 400:
raise SocialAuthenticationRequired(self.credential_type, token_data.username)
raise e
else:
return result
async def list_friends(self, gamespace, account_id):
friends = await self.call(gamespace, account_id, self.api_get_friends)
return friends
def has_friend_list(self):
return True
async def get_social_profile(self, gamespace, username, account_id, env=None):
user_info = await self.call(
gamespace,
account_id,
self.api_get_user_info,
fields="id,name,email,locale")
return user_info
async def import_social(self, gamespace, username, auth):
access_token = auth.access_token
expires_in = to_int(auth.expires_in)
data = {}
result = await self.import_data(
gamespace,
username,
access_token,
expires_in, data)
return result
| mit | 5,517,608,928,784,957,000 | 29.379747 | 93 | 0.614167 | false |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/classification/resnet2D_0.7res_80/config_2Dfinal.py | 1 | 3207 | from collections import defaultdict
from datetime import datetime
import json
import tensorflow as tf
import os, sys
import pandas as pd
#config dic
H = defaultdict(lambda: None)
#All possible config options:
H['optimizer'] = 'MomentumOptimizer'#'RMSPropOptimizer'
H['learning_rate'] = 0.001
H['momentum'] = 0.9 #0.99
H['kernel_num'] = 16 #32
H['dropout_keep_prob'] = 1.0
H['gpu_fraction'] = 0.9
H['num_classes'] = 2
H['model_name'] = 'resnet2D'
H['pretrained_checkpoint_dir'] = '../luna_resnet2D/output_dir/gold_prio3_plane_mil0'#../luna_resnet2D/output_dir/gen8_20z_3rot_stage1_deep
H['output_dir'] = 'output_dir/old_but_gold_plane_mil0_b4_init_luna' #cross_crop_retrain_zrot
H['predictions_dir'] = ''
H['allow_soft_placement'] = True
H['log_device_placement'] = False
H['max_steps'] = 35
H['MOVING_AVERAGE_DECAY'] = 0.9
H['BATCH_NORM_CENTER'] = True
H['BATCH_NORM_SCALE'] = True
H['weights_initializer'] = 'xavier_initializer' #'xavier_initializer', 'xavier_initializer_conv2d', 'truncated_normal_initializer'
H['gpus'] = [1]
H['summary_step'] = 10
# list iterator
# H['train_lst'] = '../data/multiview-2/tr.lst'
# H['val_lst'] = '../data/multiview-2/va.lst'
H['train_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/tr_patients_80.lst'
H['val_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/va_patients_20.lst'
#tr_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/tr' + str(run_id) + '.lst'
#va_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/va' + str(run_id) + '.lst'
#H['train_lst'] = tr_path
#H['val_lst'] = va_path
H['candidate_mode'] = False
# crossed axes options - cross is centrally cropped -> layers are stacked in z-dim
H['num_crossed_layers'] = 1
H['crossed_axes'] = [0,1,2]
H['rand_drop_planes']=0
H['plane_mil'] = False
# y and x image_shape must be equal -> z has same shape!!!
# you can crop if the equal z,y and x in image shape are and smaller than in in_image_shape
# images
# in_image_shapes[1:] must be equal to len of crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax
H['in_image_shape'] = [5, 64, 64, 64, 2] #256
# not working #H['crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # Default = False or None
H['image_shape'] = [5, 3*H['num_crossed_layers'], 64, 64, 2]
H['label_shape'] = [1] #256
H['batch_size'] = 8
#iterator settings
H['load_in_ram'] = True
# due to time consuming operation and quality loss only rotation around one axis is processed randomly chosen
H['rand_rot_axes'] = [0]#,1,2] # 0: z, 1: y, 2: x (attention: x and y rotation lasts long)
H['rand_rot'] = True
H['degree_90_rot'] = H['rand_rot']
H['min_rot_angle'] = -10 #degree
H['max_rot_angle'] = 10 #degree
H['rand_mirror_axes'] = [0,1,2] # 0: z, 1: y, 2: x else False
H['rand_cropping_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # crop within given range # default False: full range
H['save_step'] = 10 # saving checkpoint
H['tr_num_examples'] = len(pd.read_csv(H['train_lst'], header=None, sep='\t'))
H['va_num_examples'] = len(pd.read_csv(H['val_lst'], header=None, sep='\t'))
| mit | 1,141,336,467,683,056,600 | 35.033708 | 138 | 0.683505 | false |
hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/test_pbkdf2hmac.py | 1 | 2440 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, _Reasons
)
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from ...doubles import DummyHashAlgorithm
from ...utils import raises_unsupported_algorithm
class TestPBKDF2HMAC(object):
def test_already_finalized(self):
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
kdf.derive(b"password")
with pytest.raises(AlreadyFinalized):
kdf.derive(b"password2")
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
key = kdf.derive(b"password")
with pytest.raises(AlreadyFinalized):
kdf.verify(b"password", key)
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
kdf.verify(b"password", key)
with pytest.raises(AlreadyFinalized):
kdf.verify(b"password", key)
def test_unsupported_algorithm(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
PBKDF2HMAC(
DummyHashAlgorithm(), 20, b"salt", 10, default_backend()
)
def test_invalid_key(self):
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
key = kdf.derive(b"password")
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
with pytest.raises(InvalidKey):
kdf.verify(b"password2", key)
def test_unicode_error_with_salt(self):
with pytest.raises(TypeError):
PBKDF2HMAC(hashes.SHA1(), 20, u"salt", 10, default_backend())
def test_unicode_error_with_key_material(self):
kdf = PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, default_backend())
with pytest.raises(TypeError):
kdf.derive(u"unicode here")
def test_invalid_backend():
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
PBKDF2HMAC(hashes.SHA1(), 20, b"salt", 10, pretend_backend)
| apache-2.0 | 3,596,000,000,704,236,500 | 35.538462 | 79 | 0.65123 | false |
duducosmos/pgs4a | private/lib/android/apk.py | 1 | 3947 | import os
import struct
import zipfile
import cStringIO
class SubFile(object):
def __init__(self, f, name, base, length):
self.f = f
self.base = base
self.offset = 0
self.length = length
self.name = name
self.f.seek(self.base)
def read(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
if length:
rv2 = self.f.read(length)
self.offset += len(rv2)
else:
rv2 = ""
return rv2
def readline(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
# Otherwise, let the system read the line all at once.
rv = self.f.readline(length)
self.offset += len(rv)
return rv
def readlines(self, length=None):
rv = [ ]
while True:
l = self.readline(length)
if not l:
break
if length is not None:
length -= len(l)
if l < 0:
break
rv.append(l)
return rv
def xreadlines(self):
return self
def __iter__(self):
return self
def next(self):
rv = self.readline()
if not rv:
raise StopIteration()
return rv
def flush(self):
return
def seek(self, offset, whence=0):
if whence == 0:
offset = offset
elif whence == 1:
offset = self.offset + offset
elif whence == 2:
offset = self.length + offset
if offset > self.length:
offset = self.length
self.offset = offset
if offset < 0:
offset = 0
self.f.seek(offset + self.base)
def tell(self):
return self.offset
def close(self):
self.f.close()
def write(self, s):
raise Exception("Write not supported by SubFile")
class APK(object):
def __init__(self, apk=None, prefix="assets/"):
"""
Opens an apk file, and lets you read the assets out of it.
`apk`
The path to the file to open. If this is None, it defaults to the
apk file we are run out of.
`prefix`
The prefix inside the apk file to read.
"""
if apk is None:
apk = os.environ["ANDROID_APK"]
self.apk = apk
self.zf = zipfile.ZipFile(apk, "r")
# A map from unprefixed filename to ZipInfo object.
self.info = { }
for i in self.zf.infolist():
fn = i.filename
if not fn.startswith(prefix):
continue
fn = fn[len(prefix):]
self.info[fn] = i
def list(self):
return sorted(self.info)
def open(self, fn):
if fn not in self.info:
raise IOError("{0} not found in apk.".format(fn))
info = self.info[fn]
if info.compress_type == zipfile.ZIP_STORED:
f = file(self.apk, "rb")
f.seek(info.header_offset)
h = struct.unpack(zipfile.structFileHeader, f.read(zipfile.sizeFileHeader))
offset = (info.header_offset +
zipfile.sizeFileHeader +
h[zipfile._FH_FILENAME_LENGTH] +
h[zipfile._FH_EXTRA_FIELD_LENGTH])
return SubFile(
f,
self.apk,
offset,
info.file_size)
return cStringIO.StringIO(self.zf.read(info))
| lgpl-2.1 | 7,976,596,062,115,813,000 | 21.683908 | 87 | 0.478085 | false |
enthought/depsolver | depsolver/pool.py | 1 | 8318 | import collections
from .bundled.traitlets \
import \
HasTraits, Dict, Instance, List, Long, Unicode
from .errors \
import \
DepSolverError, MissingPackageInfoInPool
from .package \
import \
PackageInfo
from .repository \
import \
Repository
from .requirement \
import \
Requirement
from .utils \
import \
CachedScheduler
MATCH_NONE = 0
MATCH_NAME = 1
MATCH = 2
MATCH_PROVIDE = 3
MATCH_REPLACE = 4
class Pool(HasTraits):
"""Pool objects model a pool of repositories.
Pools are able to find packages that provide a given requirements (handling
the provides concept from package metadata).
"""
repositories = List(Instance(Repository))
_packages_by_id = Dict()
_packages_by_name = Dict()
_id = Long(1)
_repository_by_name = Instance(collections.defaultdict)
_scheduler = Instance(CachedScheduler)
def __init__(self, repositories=None, **kw):
scheduler = CachedScheduler()
repository_by_name = collections.defaultdict(list)
super(Pool, self).__init__(self, _scheduler=scheduler,
_repository_by_name=repository_by_name, **kw)
if repositories is None:
repositories = []
# provide.name -> package mapping
self._packages_by_name = collections.defaultdict(list)
if len(repositories) > 0:
for repository in repositories:
self.add_repository(repository)
def has_package(self, package):
package_id = package.id
return package_id in self._packages_by_id
def add_repository(self, repository):
"""Add a repository to this pool.
Arguments
---------
repository: Repository
repository to add
"""
self.repositories.append(repository)
self._repository_by_name[repository.name].append(repository)
for package in repository.iter_packages():
package.id = self._id
self._id += 1
self._packages_by_id[package.id] = package
self._packages_by_name[package.name].append(package)
for provide in package.provides:
self._packages_by_name[provide.name].append(package)
for replace in package.replaces:
self._packages_by_name[replace.name].append(package)
def package_by_id(self, package_id):
"""Retrieve a package from its id.
Arguments
---------
package_id: str
A package id
"""
try:
return self._packages_by_id[package_id]
except KeyError:
raise MissingPackageInfoInPool(package_id)
def what_provides(self, requirement, mode='composer'):
"""Returns a list of packages that provide the given requirement.
Arguments
---------
requirement: Requirement
the requirement to match
mode: str
One of the following string:
- 'composer': behaves like Composer does, i.e. only returns
packages that match this requirement directly, unless no
match is found in which case packages that provide the
requirement indirectly are returned.
- 'direct_only': only returns packages that match this
requirement directly (i.e. provides are ignored).
- 'include_indirect': only returns packages that match this
requirement directly or indirectly (i.e. includes packages
that provides this package)
"""
# FIXME: this is conceptually copied from whatProvides in Composer, but
# I don't understand why the policy of preferring non-provided over
# provided packages is handled here.
if not mode in ['composer', 'direct_only', 'include_indirect']:
raise ValueError("Invalid mode %r" % mode)
strict_matches = []
provided_match = []
name_match = False
for package in self._packages_by_name[requirement.name]:
match = self.matches(package, requirement)
if match == MATCH_NONE:
pass
elif match == MATCH_NAME:
name_match = True
elif match == MATCH:
name_match = True
strict_matches.append(package)
elif match == MATCH_PROVIDE:
provided_match.append(package)
elif match == MATCH_REPLACE:
strict_matches.append(package)
else:
raise ValueError("Invalid match type: {}".format(match))
if mode == 'composer':
if name_match:
return strict_matches
else:
return strict_matches + provided_match
elif mode == 'direct_only':
return strict_matches
elif mode == 'include_indirect':
return strict_matches + provided_match
def matches(self, candidate, requirement):
"""Checks whether the candidate package matches the requirement, either
directly or through provides.
Arguments
---------
candidate: PackageInfo
Candidate package
requirement: Requirement
The requirement to match
Returns
-------
match_type: _Match or False
An instance of Match, that specified the type of match:
- if only the name matches, will be MATCH_NAME
- if the name and version actually match, will be MATCH
- if the match is through the package's provides, will be MATCH_PROVIDE
- if no match at all, will be False
Examples
--------
>>> from depsolver import PackageInfo, Requirement
>>> R = Requirement.from_string
>>> pool = Pool()
>>> pool.matches(PackageInfo.from_string('numpy-1.3.0'), R('numpy >= 1.2.0')) == MATCH
True
"""
if requirement.name == candidate.name:
candidate_requirement = Requirement.from_package_string(candidate.unique_name, candidate.version_factory)
if requirement.is_universal or candidate_requirement.matches(requirement):
return MATCH
else:
return MATCH_NAME
else:
for provide in candidate.provides:
if requirement.matches(provide):
return MATCH_PROVIDE
for replace in candidate.replaces:
if requirement.matches(replace):
return MATCH_REPLACE
return MATCH_NONE
def id_to_string(self, package_id):
"""
Convert a package id to a nice string representation.
"""
package = self.package_by_id(abs(package_id))
if package_id > 0:
return "+" + str(package)
else:
return "-" + str(package)
#------------------------
# Repository priority API
#------------------------
def set_repository_order(self, repository_name, after=None, before=None):
candidates = self._repository_by_name[repository_name]
if len(candidates) < 1:
raise DepSolverError("No repository with name '%s'" % (repository_name,))
else:
self._scheduler.set_constraints(repository_name, after, before)
def repository_priority(self, repository):
"""
Returns the priority of a repository.
Priorities are in the ]-inf, 0] integer range, and the ordering is the
same as integers: the lower the priority number, the less a repository
has priority over other repositories.
If no constraint has been set up for the repository, its priority is 0.
Parameters
----------
repository: Repository
The repository to compute the priority of.
"""
if repository.name in self._repository_by_name:
priorities = self._scheduler.compute_priority()
# We return a negative number to follow Composer convention.
return priorities.get(repository.name, 0) - (len(priorities) - 1)
else:
raise DepSolverError("Unknown repository name '%s'" % (repository.name,))
| bsd-3-clause | -270,076,686,273,748,060 | 33.658333 | 117 | 0.583914 | false |
pmoleri/memorize-accesible | speak/espeak_cmd.py | 1 | 2079 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import subprocess
import logging
logger = logging.getLogger('speak')
import espeak
PITCH_MAX = 99
RATE_MAX = 99
PITCH_DEFAULT = PITCH_MAX/2
RATE_DEFAULT = RATE_MAX/3
class AudioGrabCmd(espeak.BaseAudioGrab):
def speak(self, status, text):
self.make_pipeline('filesrc name=file-source')
# espeak uses 80 to 370
rate = 80 + (370-80) * int(status.rate) / 100
wavpath = "/tmp/speak.wav"
subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch),
"-s", str(rate), "-v", status.voice.name, text],
stdout=subprocess.PIPE)
self.stop_sound_device()
# set the source file
self.pipeline.get_by_name("file-source").props.location = wavpath
# play
self.restart_sound_device()
def voices():
out = []
result = subprocess.Popen(["espeak", "--voices"], stdout=subprocess.PIPE) \
.communicate()[0]
for line in result.split('\n'):
m = re.match(r'\s*\d+\s+([\w-]+)\s+([MF])\s+([\w_-]+)\s+(.+)', line)
if not m:
continue
language, gender, name, stuff = m.groups()
if stuff.startswith('mb/') or \
name in ('en-rhotic','english_rp','english_wmids'):
# these voices don't produce sound
continue
out.append((language, name))
return out
| gpl-2.0 | -3,199,728,626,601,833,000 | 31.484375 | 79 | 0.634921 | false |
lhellebr/GreenTea | apps/core/forms.py | 1 | 3210 | # Author: Pavel Studenik <[email protected]>
# Date: 24.9.2013
import difflib
import logging
from django import forms
import apps.core.views
from models import GroupTemplate, GroupTestTemplate, JobTemplate, Test
logger = logging.getLogger("main")
class HomepageForm(forms.Form):
ORDER_CHOICES = (
("rate", "rate"),
("score", "score")
)
def __init__(self, *args, **kwargs):
super(HomepageForm, self).__init__(*args, **kwargs)
self.fields['search'].widget.attrs['class'] = 'form-control'
order = forms.ChoiceField(
choices=ORDER_CHOICES,
required=False
)
schedule = forms.IntegerField(
required=False
)
page = forms.IntegerField(
required=False
)
search = forms.CharField(
required=False
)
def get_params(self):
return "&".join(["%s=%s" % (x, y)
for x, y in self.cleaned_data.items()])
class FilterForm(forms.Form):
onlyfail = forms.BooleanField(
required=False, initial=False
)
search = forms.CharField(
required=False
)
tag = forms.CharField(
required=False
)
slider = forms.CharField(
required=False
)
def __init__(self, *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
self.fields['search'].widget.attrs['class'] = 'form-control'
self.fields['search'].widget.attrs['placeholder'] = 'Search'
def clean_search(self):
value = self.cleaned_data["search"].strip()
return value
class GroupsForm(forms.Form):
group = forms.ModelChoiceField(
queryset=GroupTemplate.objects.filter().order_by("name"), empty_label=None)
content = forms.CharField(widget=forms.Textarea(
attrs={'class': 'form-control', 'rows': "30", 'cols': "100"}))
def save(self):
group = self.cleaned_data["group"]
tasks = group.grouptests.all().order_by("priority")
tests_exists = [it.test.name for it in tasks]
rows = self.cleaned_data["content"]
priority = len(tests_exists) + 1
for testname in rows.split():
try:
test = Test.objects.get(name=testname.strip())
if test.name not in tests_exists:
gtt = GroupTestTemplate(
test=test, group=group, priority=priority)
gtt.save()
priority += 1
except Test.DoesNotExist:
logger.warning("test %s does not exist" % testname)
return group
class JobForm(forms.Form):
jobs1 = forms.ModelChoiceField(queryset=JobTemplate.objects.filter(
is_enable=True).order_by("whiteboard"), empty_label=None)
jobs2 = forms.ModelChoiceField(queryset=JobTemplate.objects.filter(
is_enable=True).order_by("whiteboard"), empty_label=None)
def compare(self):
d = difflib.HtmlDiff(wrapcolumn=90)
job1_xml = apps.core.views.get_xml(
self.cleaned_data["jobs1"]).splitlines(1)
job2_xml = apps.core.views.get_xml(
self.cleaned_data["jobs2"]).splitlines(1)
return d.make_table(job1_xml, job2_xml)
| gpl-2.0 | 7,638,471,029,832,933,000 | 29.283019 | 83 | 0.597819 | false |
gfrances/model-based-social-simulations | experiments/all-agents-simple.py | 1 | 1661 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
# from experiments.src.exp import AggregateExperiment, MDPAgentConfiguration, SingleExperiment
from src.sequential_taskgen import SequentialTaskgen
from src.experiment import AggregateExperiment, MDPAgentConfiguration, SingleExperiment, LazyAgentConfiguration, \
RandomAgentConfiguration, GreedyAgentConfiguration, MotionlessAgentConfiguration
def main():
"""
A simple single-run experiment comparing the performance of 10 units of each of our agent types.
"""
exp = AggregateExperiment(parse_arguments())
pop = 10
mdp = MDPAgentConfiguration(population=pop, horizon=6, width=1000)
lazy = LazyAgentConfiguration(population=pop, alpha=0.7)
random = RandomAgentConfiguration(population=pop)
greedy = GreedyAgentConfiguration(population=pop)
motionless = MotionlessAgentConfiguration(population=pop)
exp.add_single(SingleExperiment(timesteps=200, runs=1, simulation_map='r25_i0',
label="all", agents=[mdp, lazy, random, greedy, motionless]))
exp.bootstrap()
t = SequentialTaskgen(exp)
t.run()
def parse_arguments():
parser = argparse.ArgumentParser(description='Generate experiment task runners.')
parser.add_argument("--name", help='The name/ID we want to give to the experiment', default='all-agents')
parser.add_argument("--timeout", help='Maximum timeout allowed, in seconds', type=int, default='0')
parser.add_argument("--mem", help='Maximum memory allowed, in GB', default='0', type=int)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| gpl-2.0 | -2,915,125,925,642,806,000 | 38.547619 | 114 | 0.718844 | false |
keithfancher/Blobulous | blobulous/enemy.py | 1 | 3919 | # Copyright 2011, 2012 Keith Fancher
#
# This file is part of Blobulous.
#
# Blobulous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blobulous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blobulous. If not, see <http://www.gnu.org/licenses/>.
import random
import pygame
import settings as s
from explosion import Explosion
class Enemy(pygame.sprite.Sprite):
def __init__(self, explosion_container, *containers):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self, containers)
# Set speed vector
self.delta_x = 0
self.delta_y = 0
# Whether this enemy is currently targeted by the player
self.targeted = False
# Used by the circle collision detection. Allows a slightly smaller and
# more accurate hit "box".
self.radius = 21
# Load the image
self.image = pygame.image.load("images/enemy.png").convert()
self.image.set_colorkey(pygame.Color('black'))
self.rect = self.image.get_rect()
# Need to pass the enemy a sprite group to contain its explosion after
# it's destroyed, so the explosion can live on after the enemy has long
# since bitten the dust
self.explosion_container = explosion_container
self.random_spawn()
def update(self):
"""Update enemy position"""
self.rect.top += self.delta_y
self.rect.left += self.delta_x
self.kill_if_offscreen() # Destroy object if offscreen
def kill(self):
"""Override Sprite.kill() so enemies (and their descendent classes)
will explode instead of just disappearing"""
Explosion(self.rect.center, self.explosion_container)
pygame.sprite.Sprite.kill(self)
def kill_if_offscreen(self):
"""Kill any enemies that go more than 60 pixels off the screen"""
if self.rect.left < -60 or self.rect.left > s.SCREEN_W + 60:
self.kill()
elif self.rect.top < -60 or self.rect.top > s.SCREEN_H + 60:
self.kill()
def random_spawn(self):
"""Spawns somewhere off the screen with random direction and speed"""
# Directional constants... makes this shit a bit easier to read
TOP = 0
BOTTOM = 1
LEFT = 2
RIGHT = 3
# At top of screen, bottom, left, or right
spawn_location = random.randint(0, 3)
if spawn_location == TOP:
self.rect.left = random.randint(0, s.SCREEN_W - self.rect.width)
self.rect.bottom = 0
self.delta_x = random.randint(-5, 5)
self.delta_y = random.randint(1, 5) # gotta move down
elif spawn_location == BOTTOM:
self.rect.left = random.randint(0, s.SCREEN_W - self.rect.width)
self.rect.top = s.SCREEN_H
self.delta_x = random.randint(-5, 5)
self.delta_y = random.randint(-5, -1) # gotta move up
elif spawn_location == LEFT:
self.rect.right = 0
self.rect.top = random.randint(0, s.SCREEN_H - self.rect.height)
self.delta_x = random.randint(1, 5) # gotta move right
self.delta_y = random.randint(-5, 5)
elif spawn_location == RIGHT:
self.rect.left = s.SCREEN_W
self.rect.top = random.randint(0, s.SCREEN_H - self.rect.height)
self.delta_x = random.randint(-5, -1) # gotta move left
self.delta_y = random.randint(-5, 5)
| gpl-3.0 | 157,451,090,156,659,620 | 35.287037 | 79 | 0.628987 | false |
Beit-Hatfutsot/dbs-back | scripts/migrate.py | 1 | 16413 | # -*- coding: utf-8 -*-
import re
import os
import sys
import logging
from argparse import ArgumentParser
from decimal import Decimal
import datetime
import calendar
import time
from functools import partial
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from bson.code import Code
from gedcom import Gedcom, GedcomParseError
from migration.migration_sqlclient import MigrationSQLClient
from migration.tasks import update_row
from migration.files import upload_photo
from migration.family_trees import Gedcom2Persons
from bhs_api.utils import get_migrate_conf, create_thumb, get_unit_type
from bhs_api import phonetic
from bhs_api.item import get_collection_id_field
conf = get_migrate_conf(('queries_repo_path', 'sql_server', 'sql_user', 'sql_password',
'collections_to_migrate', 'sql_db', 'photos_mount_point', 'movies_mount_point',
'gentree_mount_point', 'gentree_bucket_name', 'photos_bucket_name', 'movies_bucket_name'))
sqlClient = MigrationSQLClient(conf.sql_server, conf.sql_user, conf.sql_password, conf.sql_db)
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger('scripts.migrate')
logger.setLevel(logging.getLevelName('INFO'))
repeated_slugs = {'He': {}, 'En': {}}
split = lambda x: re.split(',|\||;| ', x)
def parse_args():
parser = ArgumentParser()
parser.add_argument('-c', '--collection')
parser.add_argument('--host', default='localhost')
parser.add_argument('-s', '--since', default=0)
parser.add_argument('-u', '--until', default=calendar.timegm(time.localtime()))
parser.add_argument('-i', '--unitid', type=int,
help='migrate a specifc unit/tree id')
parser.add_argument('-g', '--gedcom_path',
help='file path to a gedcom file. works only when -i XXX -c genTrees is used')
parser.add_argument('--lasthours',
help="migrate all content changed in the last LASTHOURS")
parser.add_argument('--dryrun', help="don't update data, just print what will be done")
return parser.parse_args()
def get_now_str():
format = '%d.%h-%H:%M:%S'
now = datetime.datetime.now()
now_str = datetime.datetime.strftime(now, format)
return now_str
def get_queries(collection_name=None, repo_path=conf.queries_repo_path):
''' return a dictionary with values of MSSQL query template and filenames
keys.
:param collection_name: the name of the collection, if False or missing
return the queries for all the collections
:param repo_path: where all the files are. defaults to the value from
the conf file
'''
queries = {}
if repo_path[-1] != '/':
repo_path = repo_path + '/'
if collection_name:
filenames = [collection_name + '.sql']
else:
# No single collection specified, migrating all the collections from conf
filenames = [col_name + '.sql' for col_name in conf.collections_to_migrate]
for filename in filenames:
try:
fh = open(os.path.join(repo_path, filename))
except IOError:
logger.error('Could not open file \'{}\' in {}.'.format(filename,
os.getcwd())
)
sys.exit(1)
queries[filename[:-4]] = fh.read()
fh.close()
return queries
def make_array(val, to_int=False):
''' make an array from a string of values separated by ',', '|' or ' ' '''
if val == None:
return []
else:
if not to_int:
return split(val[:-1])
else:
try:
return [int(x) for x in split(val[:-1])]
except ValueError:
logger.error('Value error while converting {}'.format(val))
return []
def make_subdocument_array(doc_arr, key, val_string):
returned_arr = doc_arr
if val_string == None:
return returned_arr
elif len(val_string) > 10000:
doc_id = None
logger.error('Given string is too long for {}!'.format(doc_id))
return returned_arr
sub_values = make_array(val_string)
for i in range(len(sub_values)):
val = sub_values[i]
if i >= len(returned_arr):
returned_arr.append({})
if is_lang_aware_key(key):
lang_prefix = key[:2]
lang_agnostic_key = key[2:]
if lang_agnostic_key in returned_arr[i]:
returned_arr[i][lang_agnostic_key][lang_prefix] = val
else:
doc = {}
doc[lang_prefix] = val
returned_arr[i][lang_agnostic_key] = doc
else:
returned_arr[i][key] = val
return returned_arr
def is_lang_aware_key(key):
lang_prefix = key[:2]
if lang_prefix == 'He' or lang_prefix == 'En':
return True
return False
def parse_common(doc):
parsed_doc = {}
parsed_doc['Attachments'] = []
parsed_doc['UnitPlaces'] = []
parsed_doc['Pictures'] = []
for key, val in doc.items():
if isinstance(val, Decimal):
parsed_doc[key] = float(val)
continue
elif isinstance(val, str):
try:
parsed_doc[key] = val.decode('utf-8')
except UnicodeDecodeError:
try:
if key == 'TS':
parsed_doc[key] = val.encode('hex')
continue
except:
logger.warning('failed to migrate key: %s' % key)
except:
logger.warning('failed to migrate key: %s' % key)
if key == 'LexiconIds':
parsed_doc[key] = make_array(val)
elif key in ('AttachmentFileName', 'AttachmentPath', 'AttachmentNum'):
parsed_doc['Attachments'] = make_subdocument_array(
parsed_doc['Attachments'], key, val)
elif key in ('PlaceIds', 'PlaceTypeCodes', 'EnPlaceTypeCodesDesc',
'HePlaceTypeCodesDesc'):
parsed_doc['UnitPlaces'] = make_subdocument_array(
parsed_doc['UnitPlaces'], key, val)
elif key in ('PictureId', 'IsPreview'):
parsed_doc['Pictures'] = make_subdocument_array(
parsed_doc['Pictures'], key, val)
elif is_lang_aware_key(key):
lang_prefix = key[:2]
lang_agnostic_key = key[2:]
if lang_agnostic_key in parsed_doc:
try:
parsed_doc[lang_agnostic_key][lang_prefix] = val
except:
d = {}
d[lang_prefix] = val
parsed_doc[lang_agnostic_key] = d
else:
d = {}
d[lang_prefix] = val
parsed_doc[lang_agnostic_key] = d
else:
parsed_doc[key] = val
return parsed_doc
def parse_image_unit(doc):
image_unit_doc = parse_common(doc)
image_unit_doc['PreviewPics'] = []
image_unit_doc['UnitPersonalities'] = []
image_unit_doc['UnitPeriod'] = []
image_unit_doc['Exhibitions'] = []
if not image_unit_doc.has_key('Pictures'):
image_unit_doc['Pictures'] = []
for key, val in doc.items():
if key in ('IsPreviewPreview', 'PrevPictureId'):
image_unit_doc['PreviewPics'] = make_subdocument_array(image_unit_doc['PreviewPics'], key, val)
elif key in ('PersonalityId', 'PersonalityType', 'EnPersonalityTypeDesc', 'HePersonalityTypeDesc', 'PerformerType', 'EnPerformerTypeDesc', 'HePerformerTypeDesc', 'OrderBy'):
image_unit_doc['UnitPersonalities'] = make_subdocument_array(image_unit_doc['UnitPersonalities'], key, val)
elif key in ('PicId', 'OldPictureNumber', 'PictureTypeCode', 'EnPictureTypeDesc', 'HePictureTypeDesc', 'Resolution', 'NegativeNumber', 'PictureLocation', 'LocationCode', 'ToScan', 'ForDisplay', 'IsLandscape'):
image_unit_doc['Pictures'] = make_subdocument_array(image_unit_doc['Pictures'], key, val)
elif key in ('PeriodNum', 'PeriodTypeCode', 'EnPeriodTypeDesc', 'HePeriodTypeDesc', 'PeriodDateTypeCode', 'EnPeriodDateTypeDesc', 'HePeriodDateTypeDesc', 'PeriodStartDate', 'PeriodEndDate', 'EnPeriodDesc', 'HePeriodDesc'):
image_unit_doc['UnitPeriod'] = make_subdocument_array(image_unit_doc['UnitPeriod'], key, val)
elif key in ('ExhibitionId', 'ExhibitionIsPreview'):
image_unit_doc['Exhibitions'] = make_subdocument_array(image_unit_doc['Exhibitions'], key, val)
elif key in ('AttachmentFileName', 'AttachmentPath', 'AttachmentNum'):
image_unit_doc['Attachments'] = make_subdocument_array(image_unit_doc['Attachments'], key, val)
elif key in ('SourceIds', 'PIctureReceived'):
# REALLY PIctureReceived?!
image_unit_doc[key] = make_array(val)
return image_unit_doc
def parse_image(doc):
image_doc = doc.copy()
# create thumbnail and attach to document
thumb_binary = create_thumb(image_doc, conf.photos_mount_point)
if thumb_binary:
image_doc['bin'] = thumb_binary
return image_doc
def parse_person(doc):
indi_doc = {}
for key, val in doc.items():
if key in ('BIRT_PLAC', 'MARR_PLAC', 'DEAT_PLAC'):
indi_doc[key] = val
if val:
indi_doc[key + '_lc'] = val.lower()
else:
indi_doc[key + '_lc'] = val
elif key in ['MSD', 'MED']:
indi_doc[key] = make_array(val, to_int=True)
elif key =='name':
indi_doc[key] = val
indi_doc['name_lc'] = map(unicode.lower, val)
indi_doc['name_S'] = map(phonetic.get_bhp_soundex, val)
else:
indi_doc[key] = val
if key in ('BIRT_PLAC', 'MARR_PLAC', 'DEAT_PLAC'):
indi_doc[key + '_S'] = phonetic.get_bhp_soundex(val)
return indi_doc
def parse_identity(doc):
return doc
def parse_synonym(doc):
parsed = {}
parsed['_id'] = doc['SynonymKey']
if doc['LanguageCode'] == 0:
parsed['lang'] = 'En'
else:
parsed['lang'] = 'He'
parsed['s_group'] = doc['Num']
parsed['str'] = doc['Synonym']
parsed['str_lc'] = doc['Synonym'].lower()
return parsed
def parse_doc(doc, collection_name):
collection_procedure_map = {
'places': parse_common,
'familyNames': parse_common,
'lexicon': parse_common,
'photoUnits': parse_image_unit,
'photos': parse_image,
'persons': parse_person,
'synonyms': parse_synonym,
'personalities': parse_common,
'movies': parse_common,
}
return collection_procedure_map[collection_name](doc)
def parse_n_update(row, collection_name, dryrun=False):
doc = parse_doc(row, collection_name)
id_field = get_collection_id_field(collection_name)
logger.info('{}:Updating {}: {}, updated {}'.format(
collection_name, id_field, doc[id_field],
doc.get('UpdateDate', '?')))
if not dryrun:
update_row.delay(doc, collection_name)
return doc
def get_file_descriptors(tree, gedcom_path):
''' returns both the file_id and the full file name of the gedcom file '''
if not gedcom_path:
gedcom_path = tree['GenTreePath']
file_id = os.path.split(gedcom_path)[-1].split('.')[0]
file_name = os.path.join(conf.gentree_mount_point,
gedcom_path)
return file_id, file_name
def migrate_trees(cursor, only_process_treenum=None, gedcom_path=None, on_save=None, dryrun=False):
''' get command line arguments and sql query and initiated update_tree
and update_row celery tasks.
returns how many people migrated
'''
collection_name = "persons"
row_number = 0
filtered_rows = filter(lambda row: not only_process_treenum or row['GenTreeNumber'] == only_process_treenum, cursor)
for row_number, row in enumerate(filtered_rows, start=1):
file_id, file_name = get_file_descriptors(row, gedcom_path)
try:
gedcom_fd = open(file_name)
except IOError, e:
logger.error('failed to open gedocm file tree number {}, path {}: {}'.format(row['GenTreeNumber'], file_name, str(e)))
else:
try:
g = Gedcom(fd=gedcom_fd)
except (SyntaxError, GedcomParseError) as e:
logger.error('failed to parse tree number {}, path {}: {}'.format(row['GenTreeNumber'], file_name, str(e)))
else:
logger.info('>>> migrating tree {}, path {}'.format(row['GenTreeNumber'], file_name))
if on_save and dryrun:
raise Exception("dryrun is not supported with on_save")
else:
on_save = partial(parse_n_update, collection_name=collection_name, dryrun=dryrun) if not on_save else on_save
Gedcom2Persons(g, row['GenTreeNumber'], file_id, on_save)
logger.info('<<< migrated tree {}, path {}'.format(row['GenTreeNumber'], file_name))
return row_number
if __name__ == '__main__':
args = parse_args()
until = int(args.until)
since_file = None
if not args.since:
if args.lasthours:
past = datetime.datetime.now() -\
datetime.timedelta(hours=int(args.lasthours))
since = calendar.timegm(past.timetuple())
else:
try:
since_file = open('/var/run/bhs/last_update', 'r+')
since = since_file.read()
since = int(since) + 1
except IOError:
since_file = None
since = 0
else:
since = int(args.since)
collection = args.collection
queries = get_queries(collection)
logger.info('looking for changed items in {}-{}'.format(since, until))
photos_to_update = []
for collection_name, query in queries.items():
if collection_name == 'genTrees':
# the family trees get special treatment
# TODO: don't give them special treatment..
# this is called "persons" collection in mongo / ES
# TODO: have all places refer to it as "persons" instead of variations on genTrees / ftrees etc..
tree_nums = [args.unitid] if args.unitid else None
sql_cursor = sqlClient.execute(query, since=since, until=until)
count = migrate_trees(sql_cursor, args.unitid, args.gedcom_path, dryrun=args.dryrun)
if not count:
logger.info('{}:Skipping'.format(collection_name))
else:
if args.unitid:
sql_cursor = sqlClient.execute(query, unit_ids=[args.unitid])
else:
sql_cursor = sqlClient.execute(query, since=since, until=until)
if sql_cursor:
for row in sql_cursor:
doc = parse_n_update(row, collection_name, dryrun=args.dryrun)
# collect all the photos
pictures = doc.get('Pictures', None)
if pictures:
for pic in pictures:
if 'PictureId' in pic:
photos_to_update.append(pic['PictureId'])
else:
logger.warn('failed getting updated units {}:{}'
.format(collection_name, ','.join(units)))
# TODO:
# rsync_media(collection_name)
# update photos
if len(photos_to_update) > 0:
photos_query = get_queries('photos')['photos']
photos_cursor = sqlClient.execute(photos_query,
unit_ids=photos_to_update,
)
for row in photos_cursor:
upload_photo(row, conf, dryrun=args.dryrun)
if since_file and not args.dryrun:
since_file.seek(0)
since_file.write(str(until))
since_file.close()
logger.info("closing sql connection...")
sqlClient.close_connections()
| agpl-3.0 | -3,581,223,935,101,060,000 | 38.26555 | 230 | 0.570767 | false |
ppries/tensorflow | tensorflow/python/kernel_tests/large_concat_op_test.py | 1 | 1511 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class LargeConcatOpTest(tf.test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with tf.device("/cpu:0"):
a = tf.ones([2**31 + 6], dtype=tf.int8)
b = tf.zeros([1024], dtype=tf.int8)
onezeros = tf.concat_v2([a, b], 0)
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = onezeros.eval()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -2,734,721,287,931,328,500 | 37.74359 | 80 | 0.667108 | false |
mick-d/nipype | nipype/interfaces/bids_utils.py | 3 | 5108 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Set of interfaces that allow interaction with BIDS data. Currently
available interfaces are:
BIDSDataGrabber: Query data from BIDS dataset using pybids grabbids.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from os.path import join, dirname
import json
from .. import logging
from .base import (traits,
DynamicTraitedSpec,
Directory,
BaseInterface,
isdefined,
Str,
Undefined)
have_pybids = True
try:
from bids import grabbids as gb
except ImportError:
have_pybids = False
LOGGER = logging.getLogger('workflows')
class BIDSDataGrabberInputSpec(DynamicTraitedSpec):
base_dir = Directory(exists=True,
desc='Path to BIDS Directory.',
mandatory=True)
output_query = traits.Dict(key_trait=Str,
value_trait=traits.Dict,
desc='Queries for outfield outputs')
raise_on_empty = traits.Bool(True, usedefault=True,
desc='Generate exception if list is empty '
'for a given field')
return_type = traits.Enum('file', 'namedtuple', usedefault=True)
class BIDSDataGrabber(BaseInterface):
""" BIDS datagrabber module that wraps around pybids to allow arbitrary
querying of BIDS datasets.
Examples
--------
By default, the BIDSDataGrabber fetches anatomical and functional images
from a project, and makes BIDS entities (e.g. subject) available for
filtering outputs.
>>> bg = BIDSDataGrabber()
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> results = bg.run() # doctest: +SKIP
Dynamically created, user-defined output fields can also be defined to
return different types of outputs from the same project. All outputs
are filtered on common entities, which can be explicitly defined as
infields.
>>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi'])
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> bg.inputs.output_query['dwi'] = dict(modality='dwi')
>>> results = bg.run() # doctest: +SKIP
"""
input_spec = BIDSDataGrabberInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def __init__(self, infields=None, **kwargs):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created.
If no matching items, returns Undefined.
"""
super(BIDSDataGrabber, self).__init__(**kwargs)
if not isdefined(self.inputs.output_query):
self.inputs.output_query = {"func": {"modality": "func"},
"anat": {"modality": "anat"}}
# If infields is empty, use all BIDS entities
if not infields is None and have_pybids:
bids_config = join(dirname(gb.__file__), 'config', 'bids.json')
bids_config = json.load(open(bids_config, 'r'))
infields = [i['name'] for i in bids_config['entities']]
self._infields = infields or []
# used for mandatory inputs check
undefined_traits = {}
for key in self._infields:
self.inputs.add_trait(key, traits.Any)
undefined_traits[key] = kwargs[key] if key in kwargs else Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
def _run_interface(self, runtime):
if not have_pybids:
raise ImportError(
"The BIDSEventsGrabber interface requires pybids."
" Please make sure it is installed.")
return runtime
def _list_outputs(self):
layout = gb.BIDSLayout(self.inputs.base_dir)
# If infield is not given nm input value, silently ignore
filters = {}
for key in self._infields:
value = getattr(self.inputs, key)
if isdefined(value):
filters[key] = value
outputs = {}
for key, query in self.inputs.output_query.items():
args = query.copy()
args.update(filters)
filelist = layout.get(return_type=self.inputs.return_type, **args)
if len(filelist) == 0:
msg = 'Output key: %s returned no files' % key
if self.inputs.raise_on_empty:
raise IOError(msg)
else:
LOGGER.warning(msg)
filelist = Undefined
outputs[key] = filelist
return outputs
| bsd-3-clause | -78,829,929,764,361,940 | 33.748299 | 79 | 0.584573 | false |
bool-/btcbot | jsonrpc/authproxy.py | 1 | 3903 |
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import json
import decimal
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
class JSONRPCException(Exception):
def __init__(self, rpcError):
Exception.__init__(self)
self.error = rpcError
class AuthServiceProxy(object):
def __init__(self, serviceURL, serviceName=None):
self.__serviceURL = serviceURL
self.__serviceName = serviceName
self.__url = urlparse.urlparse(serviceURL)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
self.__idcnt = 0
authpair = "%s:%s" % (self.__url.username, self.__url.password)
authpair = authpair.encode('utf8')
self.__authhdr = "Basic ".encode('utf8') + base64.b64encode(authpair)
if self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port, None, None,False,
HTTP_TIMEOUT)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port, False,
HTTP_TIMEOUT)
def __getattr__(self, name):
if self.__serviceName != None:
name = "%s.%s" % (self.__serviceName, name)
return AuthServiceProxy(self.__serviceURL, name)
def __call__(self, *args):
self.__idcnt += 1
postdata = json.dumps({
'version': '1.1',
'method': self.__serviceName,
'params': args,
'id': self.__idcnt})
self.__conn.request('POST', self.__url.path, postdata,
{ 'Host' : self.__url.hostname,
'User-Agent' : USER_AGENT,
'Authorization' : self.__authhdr,
'Content-type' : 'application/json' })
httpresp = self.__conn.getresponse()
if httpresp is None:
raise JSONRPCException({
'code' : -342, 'message' : 'missing HTTP response from server'})
resp = httpresp.read()
resp = resp.decode('utf8')
resp = json.loads(resp)
if resp['error'] != None:
raise JSONRPCException(resp['error'])
elif 'result' not in resp:
raise JSONRPCException({
'code' : -343, 'message' : 'missing JSON-RPC result'})
else:
return resp['result']
| gpl-3.0 | 3,396,568,396,845,373,400 | 33.539823 | 94 | 0.607994 | false |
account-login/dnsagent | dnsagent/resolver/dual.py | 1 | 1713 | from typing import Mapping, Union, Sequence
from twisted.names.error import ResolverError
from twisted.python.failure import Failure
from dnsagent.resolver.cn import CnResolver
from dnsagent.resolver.parallel import PoliciedParallelResolver, BaseParalledResolverPolicy
__all__ = ('DualResolver',)
class PolicyError(ResolverError):
pass
class NoSuchRule(PolicyError):
pass
class SuccessFailStatePolicy(BaseParalledResolverPolicy):
SUCC = 'S'
FAIL = 'F'
WAIT = 'W'
def __init__(self, rules: Mapping[Sequence[str], Union[str, int]]):
super().__init__()
self.rules = rules
def _convert(self, result):
if result is None:
return self.WAIT
elif isinstance(result, Failure):
return self.FAIL
else:
return self.SUCC
def for_results(self, results: Sequence):
states = tuple(self._convert(x) for x in results)
try:
action = self.rules[states]
except KeyError:
raise NoSuchRule(states)
if action == self.FAIL:
raise PolicyError
elif action == self.WAIT:
return None
else:
assert isinstance(action, int)
return action
_cn_ab_policy = SuccessFailStatePolicy({
# Cn Ab
('W', 'W'): 'W',
('W', 'S'): 'W',
('W', 'F'): 'W',
('S', 'W'): 0,
('S', 'S'): 0,
('S', 'F'): 0,
('F', 'W'): 'W',
('F', 'S'): 1,
('F', 'F'): 'F',
})
class DualResolver(PoliciedParallelResolver):
def __init__(self, cn_resolver, ab_resolver, policy=_cn_ab_policy):
resolvers = [ CnResolver(cn_resolver), ab_resolver ]
super().__init__(resolvers, policy)
| mit | -6,318,335,503,835,662,000 | 23.126761 | 91 | 0.582604 | false |
jzinner/utils | samples/python_samples/filewalk.py | 1 | 1158 | #/usr/bin/python
# import os for the os.walk() function
# os.walk returns a list of tuples: 3-tuple (dirpath, dirnames, filenames)
import os
import sys
def ParseArgs():
Error1 = """ # USAGE #
# filesystemWalk.py [directory]
# filesystemWalk.py /Users/mcohoon/Devel/PythonPractice """
if len(sys.argv) != 2:
print Error1
elif not os.path.abspath(sys.argv[1]):
print Error1
else:
start = sys.argv[1]
filesystemWalk(start)
def filesystemWalk(start):
path = os.path.abspath(start)
print "path = " +path
for dirpath, dirnames, filenames in os.walk(path):
print "Found the initial directory " + dirpath
for file in filenames:
print "Found the file ", os.path.join(dirpath, file)
for dir in dirnames:
print "Found the directory ", os.path.join(dirpath, dir)
ParseArgs()
#start = "/Users/mcohoon/Devel/PythonPractice"
#start = "."
#filesystemWalk(start)
#os.path to take a string and make it into a full directory path
#os.walk gives you the path to the directory as the first value in the loop
#use os.path.join() to create full filename:
| mit | -6,329,629,763,930,400,000 | 28.692308 | 75 | 0.659758 | false |
ahaldane/numpy | numpy/lib/tests/test_recfunctions.py | 1 | 39118 | from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions._zip_descr
zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions(object):
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| bsd-3-clause | -8,832,885,361,606,307,000 | 41.335498 | 85 | 0.417557 | false |
sciencefreak500/Open_Intelligence | Python/Eyesight/camshift_test.py | 1 | 1347 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',dst)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
| gpl-2.0 | -696,926,444,738,673,900 | 26.489796 | 80 | 0.620638 | false |
jdvelasq/cashflows | cashflows/inflation.py | 1 | 4674 | """
Constant dollar transformations
===============================================================================
Overview
-------------------------------------------------------------------------------
The function ``const2curr`` computes the equivalent generic cashflow in current
dollars from a generic cashflow in constant dollars of the date given by
``base_date``. ``inflation`` is the inflation rate per compounding period.
``curr2const`` computes the inverse transformation.
Functions in this module
-------------------------------------------------------------------------------
"""
import pandas as pd
# cashflows.
from cashflows.timeseries import *
from cashflows.rate import *
from cashflows.common import *
def const2curr(cflo, inflation, base_date=0):
"""Converts a cashflow of constant dollars to current dollars
of the time `base_date`.
Args:
cflo (pandas.Series): Generic cashflow.
inflation (pandas.Series): Inflation rate per compounding period.
base_date (int, str): base date.
Returns:
A cashflow in current money (pandas.Series)
**Examples.**
>>> cflo=cashflow(const_value=[100] * 5, start='2000', freq='A')
>>> inflation=interest_rate(const_value=[10, 10, 20, 20, 20], start='2000', freq='A')
>>> const2curr(cflo=cflo, inflation=inflation) # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date=0) # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date='2000') # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date=4) # doctest: +NORMALIZE_WHITESPACE
2000 52.609428
2001 57.870370
2002 69.444444
2003 83.333333
2004 100.000000
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date='2004') # doctest: +NORMALIZE_WHITESPACE
2000 52.609428
2001 57.870370
2002 69.444444
2003 83.333333
2004 100.000000
Freq: A-DEC, dtype: float64
"""
if not isinstance(cflo, pd.Series):
raise TypeError("cflo must be a TimeSeries object")
if not isinstance(inflation, pd.Series):
raise TypeError("inflation must be a TimeSeries object")
verify_period_range([cflo, inflation])
factor = to_compound_factor(prate=inflation, base_date=base_date)
result = cflo.copy()
for time, _ in enumerate(result):
result[time] *= factor[time]
return result
def curr2const(cflo, inflation, base_date=0):
"""Converts a cashflow of current dollars to constant dollars of
the date `base_date`.
Args:
cflo (pandas.Series): Generic cashflow.
inflation_rate (float, pandas.Series): Inflation rate per compounding period.
base_date (int): base time..
Returns:
A cashflow in constant dollars
>>> cflo = cashflow(const_value=[100] * 5, start='2015', freq='A')
>>> inflation = interest_rate(const_value=[10, 10, 20, 20, 20], start='2015', freq='A')
>>> curr2const(cflo=cflo, inflation=inflation) # doctest: +NORMALIZE_WHITESPACE
2015 100.000000
2016 90.909091
2017 75.757576
2018 63.131313
2019 52.609428
Freq: A-DEC, dtype: float64
>>> curr2const(cflo=cflo, inflation=inflation, base_date=4) # doctest: +NORMALIZE_WHITESPACE
2015 190.08
2016 172.80
2017 144.00
2018 120.00
2019 100.00
Freq: A-DEC, dtype: float64
>>> curr2const(cflo=cflo, inflation=inflation, base_date='2017') # doctest: +NORMALIZE_WHITESPACE
2015 132.000000
2016 120.000000
2017 100.000000
2018 83.333333
2019 69.444444
Freq: A-DEC, dtype: float64
"""
if not isinstance(cflo, pd.Series):
raise TypeError("cflo must be a TimeSeries object")
if not isinstance(inflation, pd.Series):
raise TypeError("inflation must be a TimeSeries object")
verify_period_range([cflo, inflation])
factor = to_discount_factor(prate=inflation, base_date=base_date)
result = cflo.copy()
for time, _ in enumerate(result):
result[time] *= factor[time]
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | -2,808,769,870,040,584,700 | 29.54902 | 101 | 0.613179 | false |
reingart/suscripciones | controllers/checkout.py | 1 | 1037 | # coding: utf8
# try something like
def index():
form = SQLFORM.factory(
Field("descripcion", "string", default="Barrilete multicolor"),
Field("cantidad", "integer", default=1),
Field("precio", "float", default=1.00),
)
if form.accepts(request.vars, session):
preference_data = {
"items": [
{
"title": form.vars.descripcion,
"quantity": int(form.vars.cantidad),
"currency_id": "ARS",
"unit_price": float(form.vars.precio),
}
]
}
preference = mp.create_preference(preference_data)
#return str(preference)
return """<!DOCTYPE html>
<html>
<head>
<title>Pagar</title>
</head>
<body>
<a href="%s">Pagar</a>
</body>
</html>""" % preference['response']['init_point']
else:
response.view = "generic.html"
return {'form': form}
| agpl-3.0 | -8,104,248,428,180,297,000 | 27.027027 | 71 | 0.47541 | false |
larsbutler/oq-hazardlib | openquake/hazardlib/tests/calc/disagg_test.py | 1 | 23162 | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mock
import unittest
import warnings
import numpy
from openquake.hazardlib.calc import disagg
from openquake.hazardlib.calc import filters
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.geo import Point, Mesh
from openquake.hazardlib.site import Site
class _BaseDisaggTestCase(unittest.TestCase):
class FakeSurface(object):
def __init__(self, distance, lon, lat):
self.distance = distance
self.lon = lon
self.lat = lat
def get_joyner_boore_distance(self, sites):
assert len(sites) == 1
return numpy.array([self.distance], float)
def get_closest_points(self, sites):
assert len(sites) == 1
return Mesh(numpy.array([self.lon], float),
numpy.array([self.lat], float),
depths=None)
class FakeRupture(object):
def __init__(self, mag, probability, distance, lon, lat):
self.mag = mag
self.probability = probability
self.surface = _BaseDisaggTestCase.FakeSurface(distance, lon, lat)
def get_probability_no_exceedance(self, poe):
return (1 - self.probability) ** poe
class FakeSource(object):
def __init__(self, source_id, ruptures, tom, tectonic_region_type):
self.source_id = source_id
self.ruptures = ruptures
self.tom = tom
self.tectonic_region_type = tectonic_region_type
def iter_ruptures(self):
return iter(self.ruptures)
class FailSource(FakeSource):
def iter_ruptures(self):
raise ValueError('Something bad happened')
class FakeGSIM(object):
def __init__(self, iml, imt, truncation_level, n_epsilons,
disaggregated_poes):
self.disaggregated_poes = disaggregated_poes
self.n_epsilons = n_epsilons
self.iml = iml
self.imt = imt
self.truncation_level = truncation_level
self.dists = object()
def make_contexts(self, sites, rupture):
return (sites, rupture, self.dists)
def disaggregate_poe(self, sctx, rctx, dctx, imt, iml,
truncation_level, n_epsilons):
assert truncation_level is self.truncation_level
assert dctx is self.dists
assert imt is self.imt
assert iml is self.iml
assert n_epsilons is self.n_epsilons
assert len(sctx) == 1
return numpy.array([self.disaggregated_poes[rctx]])
def setUp(self):
self.ruptures_and_poes1 = [
([0, 0, 0], self.FakeRupture(5, 0.1, 3, 22, 44)),
([0.1, 0.2, 0.1], self.FakeRupture(5, 0.2, 11, 22, 44)),
([0, 0, 0.3], self.FakeRupture(5, 0.01, 12, 22, 45)),
([0, 0.05, 0.001], self.FakeRupture(5, 0.33, 13, 22, 45)),
([0, 0, 0], self.FakeRupture(9, 0.4, 14, 21, 44)),
([0, 0, 0.02], self.FakeRupture(5, 0.05, 11, 21, 44)),
([0.04, 0.1, 0.04], self.FakeRupture(5, 0.53, 11, 21, 45)),
([0.2, 0.3, 0.2], self.FakeRupture(5, 0.066, 10, 21, 45)),
([0.3, 0.4, 0.3], self.FakeRupture(6, 0.1, 12, 22, 44)),
([0, 0, 0.1], self.FakeRupture(6, 0.1, 12, 21, 44)),
([0, 0, 0], self.FakeRupture(6, 0.1, 11, 22, 45)),
]
self.ruptures_and_poes2 = [
([0, 0.1, 0.04], self.FakeRupture(8, 0.04, 5, 11, 45)),
([0.1, 0.5, 0.1], self.FakeRupture(7, 0.03, 5, 11, 46))
]
self.time_span = 10
self.tom = PoissonTOM(time_span=10)
self.source1 = self.FakeSource(
1, [rupture for poes, rupture in self.ruptures_and_poes1],
self.tom, 'trt1'
)
self.source2 = self.FakeSource(
2, [rupture for poes, rupture in self.ruptures_and_poes2],
self.tom, 'trt2'
)
self.disagreggated_poes = dict(
(rupture, poes) for (poes, rupture) in self.ruptures_and_poes1
+ self.ruptures_and_poes2
)
self.site = Site(Point(0, 0), 2, False, 4, 5)
self.iml, self.imt, self.truncation_level = (
object(), object(), object())
gsim = self.FakeGSIM(self.iml, self.imt, self.truncation_level,
n_epsilons=3,
disaggregated_poes=self.disagreggated_poes)
self.gsim = gsim
self.gsims = {'trt1': gsim, 'trt2': gsim}
self.sources = [self.source1, self.source2]
class CollectBinsDataTestCase(_BaseDisaggTestCase):
def test_no_filters(self):
(mags, dists, lons, lats, trts, trt_bins, probs_no_exceed) = \
disagg._collect_bins_data(
self.sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter
)
aae = numpy.testing.assert_array_equal
aae(mags, [5, 5, 5, 5, 9, 5, 5, 5, 6, 6, 6, 8, 7])
aae(dists, [3, 11, 12, 13, 14, 11, 11, 10, 12, 12, 11, 5, 5])
aae(lons, [22, 22, 22, 22, 21, 21, 21, 21, 22, 21, 22, 11, 11])
aae(lats, [44, 44, 45, 45, 44, 44, 45, 45, 44, 44, 45, 45, 46])
aae(trts, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1])
poe = numpy.array([
[0, 0, 0],
[0.1, 0.2, 0.1],
[0, 0, 0.3],
[0, 0.05, 0.001],
[0, 0, 0],
[0, 0, 0.02],
[0.04, 0.1, 0.04],
[0.2, 0.3, 0.2],
[0.3, 0.4, 0.3],
[0, 0, 0.1],
[0, 0, 0],
[0, 0.1, 0.04],
[0.1, 0.5, 0.1],
])
p_one_more = numpy.array(
[0.1, 0.2, 0.01, 0.33, 0.4, 0.05, 0.53, 0.066,
0.1, 0.1, 0.1, 0.04, 0.03]
).reshape(13, 1)
exp_p_ne = (1 - p_one_more) ** poe
aae(probs_no_exceed, exp_p_ne)
self.assertEqual(trt_bins, ['trt1', 'trt2'])
def test_filters(self):
def source_site_filter(sources_sites):
for source, sites in sources_sites:
if source is self.source2:
continue
yield source, sites
def rupture_site_filter(rupture_sites):
for rupture, sites in rupture_sites:
if rupture.mag < 6:
continue
yield rupture, sites
(mags, dists, lons, lats, trts, trt_bins, probs_no_exceed) = \
disagg._collect_bins_data(
self.sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
source_site_filter=source_site_filter,
rupture_site_filter=rupture_site_filter
)
aae = numpy.testing.assert_array_equal
aae(mags, [9, 6, 6, 6])
aae(dists, [14, 12, 12, 11])
aae(lons, [21, 22, 21, 22])
aae(lats, [44, 44, 44, 45])
aae(trts, [0, 0, 0, 0])
poe = numpy.array([
[0, 0, 0],
[0.3, 0.4, 0.3],
[0, 0, 0.1],
[0, 0, 0],
])
p_one_more = numpy.array(
[0.4, 0.1, 0.1, 0.1]
).reshape(4, 1)
exp_p_ne = (1 - p_one_more) ** poe
aae(probs_no_exceed, exp_p_ne)
self.assertEqual(trt_bins, ['trt1'])
class DefineBinsTestCase(unittest.TestCase):
def test(self):
mags = numpy.array([4.4, 5, 3.2, 7, 5.9])
dists = numpy.array([4, 1.2, 3.5, 52.1, 17])
lats = numpy.array([-25, -10, 0.6, -20, -15])
lons = numpy.array([179, -179, 176.4, -179.55, 180])
trts = [0, 1, 2, 2, 1]
trt_bins = ['foo', 'bar', 'baz']
# This is ignored by _define_bins, but it is returned by
# _collect_bins_data so we need to maintain that contract
probs_no_exceed = None
bins_data = (mags, dists, lons, lats, trts, trt_bins,
probs_no_exceed)
(mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins_
) = disagg._define_bins(
bins_data, mag_bin_width=1, dist_bin_width=4.2,
coord_bin_width=1.2, truncation_level=1, n_epsilons=4
)
aae = numpy.testing.assert_array_equal
aaae = numpy.testing.assert_array_almost_equal
aae(mag_bins, [3, 4, 5, 6, 7])
aaae(dist_bins, [0., 4.2, 8.4, 12.6, 16.8, 21., 25.2, 29.4, 33.6,
37.8, 42., 46.2, 50.4, 54.6])
aaae(lon_bins, [176.4, 177.6, 178.8, -180., -178.8])
aaae(lat_bins, [-25.2, -24., -22.8, -21.6, -20.4, -19.2, -18., -16.8,
-15.6, -14.4, -13.2, -12., -10.8, -9.6, -8.4, -7.2,
-6., -4.8, -3.6, -2.4, -1.2, 0., 1.2])
aae(eps_bins, [-1., -0.5, 0., 0.5, 1.])
self.assertIs(trt_bins, trt_bins_)
class ArangeDataInBinsTestCase(unittest.TestCase):
def test(self):
mags = numpy.array([5, 5], float)
dists = numpy.array([6, 6], float)
lons = numpy.array([19, 19], float)
lats = numpy.array([41.5, 41.5], float)
trts = numpy.array([0, 0], int)
trt_bins = ['trt1', 'trt2']
probs_one_or_more = numpy.array([0.1] * len(mags)).reshape(2, 1)
probs_exceed_given_rup = numpy.ones((len(mags), 2)) * 0.1
probs_no_exceed = (1 - probs_one_or_more) ** probs_exceed_given_rup
bins_data = (mags, dists, lons, lats, trts, trt_bins,
probs_no_exceed)
mag_bins = numpy.array([4, 6, 7], float)
dist_bins = numpy.array([0, 4, 8], float)
lon_bins = numpy.array([18, 20, 21], float)
lat_bins = numpy.array([40, 41, 42], float)
eps_bins = numpy.array([-2, 0, 2], float)
bin_edges = mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins
diss_matrix = disagg._arrange_data_in_bins(bins_data, bin_edges)
self.assertEqual(diss_matrix.shape, (2, 2, 2, 2, 2, 2))
for idx, value in [((0, 1, 0, 1, 0, 0), 0.02085163763902309),
((0, 1, 0, 1, 1, 0), 0.02085163763902309)]:
self.assertAlmostEqual(diss_matrix[idx], value)
diss_matrix[idx] = 0
self.assertEqual(diss_matrix.sum(), 0)
class DisaggregateTestCase(_BaseDisaggTestCase):
def test(self):
self.gsim.truncation_level = self.truncation_level = 1
bin_edges, matrix = disagg.disaggregation(
self.sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
mag_bin_width=3, dist_bin_width=4, coord_bin_width=2.4
)
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins = bin_edges
aaae = numpy.testing.assert_array_almost_equal
aaae(mag_bins, [3, 6, 9])
aaae(dist_bins, [0, 4, 8, 12, 16])
aaae(lon_bins, [9.6, 12., 14.4, 16.8, 19.2, 21.6, 24.])
aaae(lat_bins, [43.2, 45.6, 48.])
aaae(eps_bins, [-1, -0.3333333, 0.3333333, 1])
self.assertEqual(trt_bins, ['trt1', 'trt2'])
for idx, value in [((0, 2, 5, 0, 0, 0), 0.022067231457071457),
((0, 2, 5, 0, 1, 0), 0.043647500209963),
((0, 2, 5, 0, 2, 0), 0.022067231457071457),
((0, 3, 5, 0, 1, 0), 0.01982473192105061),
((0, 3, 5, 0, 2, 0), 0.003409751870464106),
((0, 2, 4, 0, 0, 0), 0.04290887394265486),
((0, 2, 4, 0, 1, 0), 0.09152318417708383),
((0, 2, 4, 0, 2, 0), 0.0438902176307755),
((1, 3, 5, 0, 0, 0), 0.03111383880273666),
((1, 3, 5, 0, 1, 0), 0.041268484485817325),
((1, 3, 5, 0, 2, 0), 0.03111383880273666),
((1, 3, 4, 0, 2, 0), 0.010480741793785553),
((1, 1, 0, 0, 1, 1), 0.004073878602149361),
((1, 1, 0, 0, 2, 1), 0.0016315473579483486),
((1, 1, 0, 1, 0, 1), 0.003041286638106211),
((1, 1, 0, 1, 1, 1), 0.015114219820389518),
((1, 1, 0, 1, 2, 1), 0.003041286638106211)]:
self.assertAlmostEqual(matrix[idx], value)
matrix[idx] = 0
self.assertEqual(matrix.sum(), 0)
def test_cross_idl(self):
# test disaggregation with source generating ruptures crossing
# internation date line
ruptures_and_poes = [
([0, 0.2, 0.3], self.FakeRupture(5.5, 0.04, 55, -179.5, 45.5)),
([0.4, 0.5, 0.6], self.FakeRupture(7.5, 0.03, 75, 179.5, 46.5))
]
source = self.FakeSource(
1, [rupture for poes, rupture in ruptures_and_poes],
self.tom, 'trt1'
)
disagreggated_poes = dict(
(rupture, poes) for (poes, rupture) in ruptures_and_poes
)
gsim = self.FakeGSIM(self.iml, self.imt, truncation_level=1,
n_epsilons=3,
disaggregated_poes=disagreggated_poes)
bin_edges, matrix = disagg.disaggregation(
[source], self.site, self.imt, self.iml, {'trt1': gsim},
truncation_level=1, n_epsilons=3,
mag_bin_width=1, dist_bin_width=10, coord_bin_width=1.0
)
mag_bins, dist_bins, lon_bins, lat_bins, eps_bins, trt_bins = bin_edges
aaae = numpy.testing.assert_array_almost_equal
aaae(mag_bins, [5, 6, 7, 8])
aaae(dist_bins, [50, 60, 70, 80])
aaae(lon_bins, [179., -180, -179.])
aaae(lat_bins, [45., 46., 47.])
aaae(eps_bins, [-1, -0.3333333, 0.3333333, 1])
self.assertEqual(trt_bins, ['trt1'])
for idx, value in [((0, 0, 1, 0, 0, 0), 0),
((0, 0, 1, 0, 1, 0), 0.008131160717433694),
((0, 0, 1, 0, 2, 0), 0.012171913957925717),
((2, 2, 0, 1, 0, 0), 0.012109762440985716),
((2, 2, 0, 1, 1, 0), 0.015114219820389518),
((2, 2, 0, 1, 2, 0), 0.01810953978371055)]:
self.assertAlmostEqual(matrix[idx], value)
matrix[idx] = 0
self.assertEqual(matrix.sum(), 0)
def test_source_errors(self):
# exercise the case where an error occurs while computing on a given
# seismic source; in this case, we expect an error to be raised which
# signals the id of the source in question
fail_source = self.FailSource(self.source2.source_id,
self.source2.ruptures,
self.source2.tom,
self.source2.tectonic_region_type)
sources = iter([self.source1, fail_source])
with self.assertRaises(ValueError) as ae:
bin_edges, matrix = disagg.disaggregation(
sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
mag_bin_width=3, dist_bin_width=4, coord_bin_width=2.4
)
expected_error = (
'An error occurred with source id=2. Error: Something bad happened'
)
self.assertEqual(expected_error, ae.exception.message)
def test_no_contributions_from_ruptures(self):
# Test that the `disaggregation` function returns `None, None` if no
# ruptures contribute to the hazard level.
array = numpy.array
float64 = numpy.float64
int64 = numpy.int64
# This is the result we get if the sources produce no ruptures:
fake_bins_data = (array([], dtype=float64), array([], dtype=float64),
array([], dtype=float64), array([], dtype=float64),
array([], dtype=float64), array([], dtype=int64), [])
with mock.patch(
'openquake.hazardlib.calc.disagg._collect_bins_data'
) as cbd:
with warnings.catch_warnings(record=True) as w:
cbd.return_value = fake_bins_data
self.gsim.truncation_level = self.truncation_level = 1
bin_edges, matrix = disagg.disaggregation(
self.sources, self.site, self.imt, self.iml, self.gsims,
self.truncation_level, n_epsilons=3,
mag_bin_width=3, dist_bin_width=4, coord_bin_width=2.4,
)
# We expect to get back 2 `None` values:
self.assertIsNone(bin_edges)
self.assertIsNone(matrix)
# Also check for the warning that should be raised:
expected_warning_msg = (
'No ruptures have contributed to the hazard at site '
'<Location=<Latitude=0.000000, Longitude=0.000000, '
'Depth=0.0000>, Vs30=2.0000, Vs30Measured=False, '
'Depth1.0km=4.0000, Depth2.5km=5.0000, Backarc=False>'
)
self.assertEqual(1, len(w))
[warning] = list(w)
self.assertEqual(expected_warning_msg, warning.message.message)
class PMFExtractorsTestCase(unittest.TestCase):
def setUp(self):
super(PMFExtractorsTestCase, self).setUp()
self.aae = numpy.testing.assert_almost_equal
# test matrix is not normalized, but that's fine for test
self.matrix = numpy.array(
[ # magnitude
[ # distance
[ # longitude
[ # latitude
[ # epsilon
[0.00, 0.20, 0.50], # trt
[0.33, 0.44, 0.55],
[0.10, 0.11, 0.12]],
[
[0.60, 0.30, 0.20],
[0.50, 0.50, 0.30],
[0.00, 0.10, 0.20]]],
[
[
[0.10, 0.50, 0.78],
[0.15, 0.31, 0.21],
[0.74, 0.20, 0.95]],
[
[0.05, 0.82, 0.99],
[0.55, 0.02, 0.63],
[0.52, 0.49, 0.21]]]],
[
[
[
[0.98, 0.59, 0.13],
[0.72, 0.40, 0.12],
[0.16, 0.61, 0.53]],
[
[0.04, 0.94, 0.84],
[0.13, 0.03, 0.31],
[0.95, 0.34, 0.31]]],
[
[
[0.25, 0.46, 0.34],
[0.79, 0.71, 0.17],
[0.5, 0.61, 0.7]],
[
[0.79, 0.15, 0.29],
[0.79, 0.14, 0.72],
[0.40, 0.84, 0.24]]]]],
[
[
[
[
[0.49, 0.73, 0.79],
[0.54, 0.20, 0.04],
[0.40, 0.32, 0.06]],
[
[0.73, 0.04, 0.60],
[0.53, 0.65, 0.71],
[0.47, 0.93, 0.70]]],
[
[
[0.32, 0.78, 0.97],
[0.75, 0.07, 0.59],
[0.03, 0.94, 0.12]],
[
[0.12, 0.15, 0.47],
[0.12, 0.62, 0.02],
[0.93, 0.13, 0.23]]]],
[
[
[
[0.17, 0.14, 1.00],
[0.34, 0.27, 0.08],
[0.11, 0.85, 0.85]],
[
[0.76, 0.03, 0.86],
[0.97, 0.30, 0.80],
[0.67, 0.84, 0.41]]],
[
[
[0.27, 0.36, 0.96],
[0.52, 0.77, 0.35],
[0.39, 0.88, 0.20]],
[
[0.86, 0.17, 0.07],
[0.48, 0.44, 0.69],
[0.14, 0.61, 0.67]]]]]])
def test_mag(self):
pmf = disagg.mag_pmf(self.matrix)
self.aae(pmf, [1.0, 1.0])
def test_dist(self):
pmf = disagg.dist_pmf(self.matrix)
self.aae(pmf, [1.0, 1.0])
def test_trt(self):
pmf = disagg.trt_pmf(self.matrix)
self.aae(pmf, [1.0, 1.0, 1.0])
def test_mag_dist(self):
pmf = disagg.mag_dist_pmf(self.matrix)
self.aae(pmf, [[0.999999999965, 1.0],
[1.0, 1.0]])
def test_mag_dist_eps(self):
pmf = disagg.mag_dist_eps_pmf(self.matrix)
self.aae(pmf, [[[0.999984831616, 0.997766176716, 0.998979249671],
[0.999997772739, 0.999779959211, 0.999985036077]],
[[0.999994665686, 0.999473519718, 0.999989748277],
[1.0, 0.999987940219, 0.999995956695]]])
def test_lon_Lat(self):
pmf = disagg.lon_lat_pmf(self.matrix)
self.aae(pmf, [[1.0, 1.0],
[1.0, 1.0]])
def test_mag_lon_lat(self):
pmf = disagg.mag_lon_lat_pmf(self.matrix)
self.aae(pmf, [[[0.999992269326, 0.999996551231],
[0.999999622937, 0.999999974769]],
[[1.0, 0.999999999765],
[0.999999998279, 0.999993421953]]])
def test_lon_lat_trt(self):
pmf = disagg.lon_lat_trt_pmf(self.matrix)
self.aae(pmf, [[[0.999805340359, 0.999470893656, 1.0],
[0.999998665328, 0.999969082487, 0.999980380612]],
[[0.999447922645, 0.999996344798, 0.999999678475],
[0.999981572755, 0.999464007617, 0.999983196102]]])
| agpl-3.0 | -8,118,343,141,972,529,000 | 40.286988 | 79 | 0.474311 | false |
0xffea/keystone | tests/test_versions.py | 1 | 5296 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystone import config
from keystone.openstack.common import jsonutils
from keystone import test
CONF = config.CONF
class VersionTestCase(test.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
self.load_backends()
self.public_app = self.loadapp('keystone', 'main')
self.admin_app = self.loadapp('keystone', 'admin')
self.public_server = self.serveapp('keystone', name='main')
self.admin_server = self.serveapp('keystone', name='admin')
def test_public_versions(self):
client = self.client(self.public_app)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
expected = {
"versions": {
"values": [
{
"id": "v2.0",
"status": "stable",
"updated": '2013-03-06T00:00:00Z',
"links": [
{
"rel": "self",
"href": "http://localhost:%s/v2.0/" %
CONF.public_port,
}, {
"rel": "describedby",
"type": "text/html",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"content/"
}, {
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"identity-dev-guide-2.0.pdf"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.identity-v2.0+json"
}, {
"base": "application/xml",
"type": "application/"
"vnd.openstack.identity-v2.0+xml"
}
]
}
]
}
}
self.assertEqual(data, expected)
def test_admin_versions(self):
client = self.client(self.admin_app)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
expected = {
"versions": {
"values": [
{
"id": "v2.0",
"status": "stable",
"updated": '2013-03-06T00:00:00Z',
"links": [
{
"rel": "self",
"href": "http://localhost:%s/v2.0/" %
CONF.admin_port,
}, {
"rel": "describedby",
"type": "text/html",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"content/"
}, {
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"identity-dev-guide-2.0.pdf"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.identity-v2.0+json"
}, {
"base": "application/xml",
"type": "application/"
"vnd.openstack.identity-v2.0+xml"
}
]
}
]
}
}
self.assertEqual(data, expected)
| apache-2.0 | -3,539,829,722,123,488,000 | 39.738462 | 74 | 0.375189 | false |
egabancho/invenio | invenio/modules/knowledge/api.py | 1 | 26116 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Provide API-callable functions for knowledge base management."""
import json
import os
import re
import warnings
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from invenio.base.globals import cfg
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import session_manager
from invenio.modules.search.models import Collection
from invenio.utils.memoise import Memoise
from . import models
processor_type = 0
try:
from lxml import etree
processor_type = 1
except ImportError:
try:
import libxml2
import libxslt
processor_type = 2
except ImportError:
pass
def get_kb_by_id(kb_id):
"""Return the knwKB object with given id.
:raises: :exc:`~sqlalchemy.orm.exc.NoResultFound` in case not exist.
"""
return models.KnwKB.query.filter_by(id=kb_id).one()
def get_kb_id(kb_name):
"""Get the id by name.
:param kb_name: knowledge base name
"""
warnings.warn("The method get_kb_id(kb_name) is deprecated! "
"Use instead get_kb_by_id()'",
DeprecationWarning)
return get_kb_by_id(kb_name).id
def get_kb_by_name(kb_name):
"""Return the knwKB object with given name.
:raises: :exc:`~sqlalchemy.orm.exc.NoResultFound` in case not exist.
"""
return models.KnwKB.query.filter_by(name=kb_name).one()
def get_all_kb_names():
"""Return all knowledge base names.
:return: list of names
"""
return [row.name for row in models.KnwKB.query.all()]
get_kb_by_name_memoised = Memoise(get_kb_by_name)
def query_kb_mappings(kbid, sortby="to", key="", value="",
match_type="s"):
"""Return a list of all mappings from the given kb, ordered by key.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: knowledge base name. if "", return all
:param sortby: the sorting criteria ('from' or 'to')
:param key: return only entries where key matches this
:param value: return only entries where value matches this
:param match_type: s=substring, e=exact, sw=startswith
"""
return models.KnwKBRVAL.query_kb_mappings(kbid, sortby, key,
value, match_type)
def get_kb_mappings(kb_name="", key="", value="", match_type="s", sortby="to",
limit=None):
"""Return a list of all mappings from the given kb, ordered by key.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: knowledge base name. if "", return all
:param sortby: the sorting criteria ('from' or 'to')
:param key: return only entries where key matches this
:param value: return only entries where value matches this
:limit return only X number of entries
"""
# query
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB)
# filter
if kb_name:
query = query.filter(models.KnwKB.name == kb_name)
if len(key) > 0:
if match_type == "s":
key = "%"+key+"%"
else:
key = '%'
if len(value) > 0:
if match_type == "s":
value = "%"+value+"%"
else:
value = '%'
query = query.filter(
models.KnwKBRVAL.m_key.like(key),
models.KnwKBRVAL.m_value.like(value))
# order by
if sortby == "from":
query = query.order_by(models.KnwKBRVAL.m_key)
else:
query = query.order_by(models.KnwKBRVAL.m_value)
if limit:
query = query.limit(limit)
# return results
return [kbv.to_dict() for (kbv) in query.all()]
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default="",
limit=None):
"""Get one unique mapping. If not found, return default.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param default: default value if no mapping is found
:return: a mapping
"""
mappings = get_kb_mappings(kb_name, key=key, value=value,
match_type=match_type, limit=limit)
if len(mappings) == 0:
return default
else:
return mappings[0]
@session_manager
def add_kb_mapping(kb_name, key, value=""):
"""Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
"""
kb = get_kb_by_name(kb_name)
if key in kb.kbrvals:
# update
kb.kbrvals[key].m_value = value
else:
# insert
kb.kbrvals.set(models.KnwKBRVAL(m_key=key, m_value=value))
@session_manager
def remove_kb_mapping(kb_name, key):
"""Delete an existing kb mapping in kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
"""
kb = get_kb_by_name(kb_name)
del kb.kbrvals[key]
def update_kb_mapping(kb_name, old_key, key, value):
"""Update an existing kb mapping with key old_key with a new key and value.
:param kb_name: the name of the kb where to insert the new value
:param old_key: the key of the mapping in the kb
:param key: the new key of the mapping
:param value: the new value of the mapping
"""
db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKB.name == kb_name,
models.KnwKBRVAL.m_key == old_key) \
.update({"m_key": key, "m_value": value})
def get_kb_mappings_json(kb_name="", key="", value="", match_type="s",
limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param limit: maximum number of results to return (are ALL if set to None)
:return: a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': value})
return json.dumps(ret)
def get_kb_mappings_embedded_json(kb_name="", key="", value="",
match_type="s", limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
The rightside is actually considered as a json string and hence embedded
within the final result.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param limit: maximum number of results to return (are ALL if set to None)
:return: a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': json.loads(value)})
return json.dumps(ret)
def kb_exists(kb_name):
"""Return True if a kb with the given name exists.
:param kb_name: the name of the knowledge base
"""
return db.session.query(
models.KnwKB.query.filter(
models.KnwKB.name.like(kb_name)).exists()).scalar()
def get_kb_name(kb_id):
"""Return the name of the kb given by id.
:param kb_id: the id of the knowledge base
"""
return get_kb_by_id(kb_id).name
@session_manager
def update_kb_attributes(kb_name, new_name, new_description=''):
"""Update kb kb_name with a new name and (optionally) description.
:param kb_name: the name of the kb to update
:param new_name: the new name for the kb
:param new_description: the new description for the kb
"""
models.KnwKB.query.filter_by(name=kb_name) \
.update({"name": new_name, "description": new_description})
def add_kb(kb_name="Untitled", kb_type=None, tries=10):
"""Add a new kb in database, return the id.
Add a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
:param kb_name: the name of the kb
:param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
:param tries: exit after <n> retry
:return: the id of the newly created kb
"""
created = False
name = kb_name
i = 0
while(i < tries and created is False):
try:
kb = models.KnwKB(name=name, description="", kbtype=kb_type)
created = True
db.session.add(kb)
db.session.commit()
except IntegrityError:
db.session.rollback()
# get the highest id to calculate the new name
result = db.session.execute(
db.select([models.KnwKB.id])
.order_by(db.desc(models.KnwKB.id))
.limit(1)).first()
index = result[0] + 1 if result is not None else 1
name = kb_name + " " + str(index)
i = i + 1
created = False
except:
db.session.rollback()
raise
if created is False:
# TODO raise the right exception
raise Exception("Can't create kb \"{0}\".\n" +
"Probabily the server is busy! " +
"Try again later.".format(kb_name))
return kb.id
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
"""A convenience method."""
kb_id = add_kb(kb_name=kbname, kb_type='dynamic')
save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
def save_kb_dyn_config(kb_id, field, expression, collection=None):
"""Save a dynamic knowledge base configuration.
:param kb_id: the id
:param field: the field where values are extracted
:param expression: ..using this expression
:param collection: ..in a certain collection (default is all)
"""
# check that collection exists
if collection:
collection = Collection.query.filter_by(name=collection).one()
kb = get_kb_by_id(kb_id)
kb.set_dyn_config(field, expression, collection)
def kb_mapping_exists(kb_name, key):
"""Return the information if a mapping exists.
:param kb_name: knowledge base name
:param key: left side (mapFrom)
"""
try:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return False
return key in kb.kbrvals
@session_manager
def delete_kb(kb_name):
"""Delete given kb from database.
:param kb_name: knowledge base name
"""
db.session.delete(models.KnwKB.query.filter_by(
name=kb_name).one())
# Knowledge Bases Dependencies
##
def get_elements_that_use_kb(name):
# FIXME remove the obsolete function
"""Return a list of elements that call given kb.
WARNING: this routine is obsolete.
[ {'filename':"filename_1.py"
'name': "a name"
},
...
]
:return: elements sorted by name
"""
warnings.warn("The method 'get_elements_that_use_kb(name) is obsolete!'",
DeprecationWarning)
format_elements = {}
# Retrieve all elements in files
from invenio.modules.formatter.engine \
import TEMPLATE_CONTEXT_FUNCTIONS_CACHE
for element in TEMPLATE_CONTEXT_FUNCTIONS_CACHE \
.bibformat_elements().values():
path = element.__file__
filename = os.path.basename(element.__file__)
if filename.endswith(".py"):
formatf = open(path, 'r')
code = formatf.read()
formatf.close()
# Search for use of kb inside code
kb_pattern = re.compile('''
(bfo.kb)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<kb>%s) #kb
[\'"]+\s* #Single or double quote
, #comma
''' % name, re.VERBOSE | re.MULTILINE | re.IGNORECASE)
result = kb_pattern.search(code)
if result is not None:
name = ("".join(filename.split(".")[:-1])).lower()
if name.startswith("bfe_"):
name = name[4:]
format_elements[name] = {'filename': filename,
'name': name}
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
### kb functions for export
def get_kbs_info(kbtype="", searchkbname=""):
"""A convenience method.
:param kbtype: type of kb -- get only kb's of this type
:param searchkbname: get only kb's where this sting appears in the name
"""
# query + order by
query = models.KnwKB.query.order_by(
models.KnwKB.name)
# filters
if kbtype:
query = query.filter_by(kbtype=kbtype)
if searchkbname:
query = query.filter_by(name=searchkbname)
return [row.to_dict() for row in query.all()]
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""Return an array of values "authority file" type = just values.
:param kb_name: name of kb
:param searchname: get these values, according to searchtype
:param searchtype: s=substring, e=exact, , sw=startswith
"""
if searchtype == 's' and searchname:
searchname = '%'+searchname+'%'
if searchtype == 'sw' and searchname: # startswith
searchname = searchname+'%'
if not searchname:
searchname = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_value.like(searchname),
models.KnwKB.name.like(kb_name))
return [(k.m_value,) for k in query.all()]
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return an array of keys.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
"""
if searchtype == 's' and searchkey:
searchkey = '%'+searchkey+'%'
if searchtype == 's' and searchvalue:
searchvalue = '%'+searchvalue+'%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue+'%'
if not searchvalue:
searchvalue = '%'
if not searchkey:
searchkey = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_key.like(searchkey),
models.KnwKBRVAL.m_value.like(searchvalue),
models.KnwKB.name.like(kb_name))
return [(k.m_key,) for k in query.all()]
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
use_memoise=False):
"""Return a tuple of values from key-value mapping kb.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s=substring; e=exact
:param use_memoise: can we memoise while doing lookups?
:type use_memoise: bool
"""
try:
if use_memoise:
kb = get_kb_by_name_memoised(kb_name)
else:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return []
return list(kb.get_kbr_values(searchkey, searchvalue, searchtype))
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return a list of dictionaries that match the search.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
:return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
kb = get_kb_by_name(kb_name)
return kb.get_kbr_items(searchkey, searchvalue, searchtype)
def get_kbd_values(kbname, searchwith=""):
"""Return a list of values by searching a dynamic kb.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
from invenio.legacy import search_engine
# first check that the kb in question is dynamic
kb = get_kb_by_name(kbname)
kbid = kb.id
if not kbid:
return []
kbtype = kb.kbtype
if not kbtype:
return []
if kbtype != 'd':
return []
# get the configuration so that we see what the field is
confdict = kb.kbdefs.to_dict()
if not confdict:
return []
if 'field' not in confdict:
return []
field = confdict['field']
expression = confdict['expression']
collection = ""
if 'collection' in confdict:
collection = confdict['collection']
reclist = [] # return this
if searchwith and expression:
if (expression.count('%') > 0):
expression = expression.replace("%", searchwith)
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
# no %.. just make a combination
expression = expression + " and " + searchwith
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else: # either no expr or no searchwith.. but never mind about searchwith
if expression: # in this case: only expression
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
# make a fake expression so that only records that have this field
# will be returned
fake_exp = "/.*/"
if searchwith:
fake_exp = searchwith
reclist = search_engine.perform_request_search(f=field, p=fake_exp,
cc=collection)
if reclist:
return [val for (val, dummy) in
search_engine.get_most_popular_field_values(reclist, field)]
return [] # in case nothing worked
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
def get_kbd_values_for_bibedit(tag, collection="", searchwith="",
expression=""):
"""Dynamically create a dynamic KB for a specific search; then destroy it.
This probably isn't the method you want.
Example1: tag=100__a : return values of 100__a
Example2: tag=100__a, searchwith=Jill: return values of 100__a that match
with Jill
Example3: tag=100__a, searchwith=Ellis, expression="700__a:*%*:
return values of 100__a for which Ellis matches some 700__a
Note: the performace of this function is ok compared to a plain
perform_request_search / get most popular fields -pair.
The overhead is about 5% with large record sets;
the lookups are the xpensive part.
:param tag: the tag like 100__a
:param collection: collection id
:param searchwith: the string to search. If empty, match all.
:param expression: the search expression for perform_request_search;
if present, '%' is substituted with /searcwith/.
If absent, /searchwith/ is searched for in /tag/.
"""
dkbname = "tmp_dynamic_"+tag+'_'+expression
kb_id = add_kb(kb_name=dkbname, kb_type='dynamic')
# get the kb name since it may be catenated by a number
# in case there are concurrent calls.
kb_name = get_kb_name(kb_id)
add_kb_mapping(kb_name, tag, expression, collection)
# now, get stuff
myvalues = get_kbd_values(kb_name, searchwith)
# the tmp dyn kb is now useless, delete it
delete_kb(kb_name)
return myvalues
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile.
If searchwith is defined, return only items that match with it.
:param taxonomyfilename: full path+name of the RDF file
:param templatefile: full path+name of the XSLT file
:param searchwith: a term to search with
"""
if processor_type == 1:
# lxml
doc = etree.XML(taxonomyfilename)
styledoc = etree.XML(templatefilename)
style = etree.XSLT(styledoc)
result = style(doc)
strres = str(result)
del result
del style
del styledoc
del doc
elif processor_type == 2:
# libxml2 & libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
def get_kbt_items_for_bibedit(kbtname, tag="", searchwith=""):
"""A simplifield, customized version of the function get_kbt_items.
Traverses an RDF document. By default returns all leaves. If
tag defined returns the content of that tag.
If searchwith defined, returns leaves that match it.
Warning! In order to make this faster, the matching field values
cannot be multi-line!
:param kbtname: name of the taxonony kb
:param tag: name of tag whose content
:param searchwith: a term to search with
"""
# get the actual file based on the kbt name
kb = get_kb_by_name(kbtname)
kb_id = kb.id
if not kb_id:
return []
# get the rdf file..
rdfname = cfg['CFG_WEBDIR'] + "/kbfiles/" + str(kb_id) + ".rdf"
if not os.path.exists(rdfname):
return []
xsl = """\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<xsl:output method="xml" standalone="yes"
omit-xml-declaration="yes" indent="no"/>
<xsl:template match="rdf:RDF">
<foo><!--just having some tag here speeds up output by 10x-->
<xsl:apply-templates />
</foo>
</xsl:template>
<xsl:template match="*">
<!--hi><xsl:value-of select="local-name()"/></hi-->
<xsl:if test="local-name()='"""+tag+"""'">
<myout><xsl:value-of select="normalize-space(.)"/></myout>
</xsl:if>
<!--traverse down in tree!-->
<xsl:text>
</xsl:text>
<xsl:apply-templates />
</xsl:template>
</xsl:stylesheet>"""
if processor_type == 1:
styledoc = etree.XML(xsl)
style = etree.XSLT(styledoc)
doc = etree.parse(open(rdfname, 'r'))
strres = str(style(doc))
elif processor_type == 2:
styledoc = libxml2.parseDoc(xsl)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(rdfname)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
# take only those with myout..
if line.count("<myout>") > 0:
# remove the myout tag..
line = line[9:]
line = line[:-8]
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
ritems.append(line)
return ritems
if __name__ == "__main__":
pass
| gpl-2.0 | -6,103,226,367,035,997,000 | 32.142132 | 79 | 0.607559 | false |
jlettvin/Unicode | py2/test/test_Array.py | 1 | 3301 | # -*- coding: utf8 -*-
__module__ = "test_Array.py"
__author__ = "Jonathan D. Lettvin"
__copyright__ = "\
Copyright(C) 2016 Jonathan D. Lettvin, All Rights Reserved"
__credits__ = [ "Jonathan D. Lettvin" ]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Jonathan D. Lettvin"
__email__ = "[email protected]"
__contact__ = "[email protected]"
__status__ = "Demonstration"
__date__ = "20161107"
import unittest2
import inspect
import sys
sys.path.append('.')
sys.path.append('..')
from Self import ( Self )
from Array import ( Array )
class ArrayTestCase(unittest2.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1x1(self):
"""
Construct a zero-initialized one member array.
Check for expected initialization.
"""
data = [[0]]
expect = {'shape': (1, 1), 'data': data, 'size': 1}
uniarray = Array((1,1), 0)
self.assertEquals(uniarray, expect, Self.doc())
def test_1x2(self):
"""
Construct a -1 initialized two member array.
Check for expected initialization.
"""
data = [[-1, -1]]
expect = {'shape': (1, 2), 'data': data, 'size': 2}
uniarray = Array((1,2), -1)
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2(self):
"""
Construct a 9 initialized eight member array.
Check for expected initialization.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [8, 8]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_element(self):
"""
Construct a 9 initialized eight member array.
Modify last element by index.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [8, 9]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
uniarray[1,1,1] = 9
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_pair(self):
"""
Construct an 8 initialized eight member array.
Modify a pair.
"""
data = [[[8, 8], [8, 8]], [[8, 8], [7, 9]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 8)
uniarray[1,1] = [7, 9]
self.assertEquals(uniarray, expect, Self.doc())
def test_2x2x2_modify_block(self):
"""
Construct a 0 initialized eight member array.
Modify a top-level block.
"""
data = [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]
expect = {'shape': (2, 2, 2), 'data': data, 'size': 8}
uniarray = Array((2, 2, 2), 0)
#uniarray[1] = [[5, 6], [7, 8]]
#self.assertEquals(uniarray, expect, Self.doc())
def test_2x3_convert_4x1(self):
"""
Construct a 2-initialized 4 member array.
Check for expected initialization.
"""
before = [[2, 2, 2], [2, 2, 2]]
after = [[5], [5], [5], [5]]
expect = {'shape': (4, 1), 'data': after, 'size': 4}
uniarray = Array((2,3), 2)
uniarray(**expect)
self.assertEquals(uniarray, expect, Self.doc())
| gpl-3.0 | 5,949,441,183,369,446,000 | 29.284404 | 62 | 0.515601 | false |
Azure/azure-documentdb-python | test/aggregate_tests.py | 1 | 10858 | # The MIT License (MIT)
# Copyright (c) 2017 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import unittest
import uuid
import pytest
from six import with_metaclass
from six.moves import xrange
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.documents as documents
import test.test_config as test_config
from azure.cosmos.errors import HTTPFailure
class _config:
host = test_config._test_config.host
master_key = test_config._test_config.masterKey
connection_policy = test_config._test_config.connectionPolicy
PARTITION_KEY = 'key'
UNIQUE_PARTITION_KEY = 'uniquePartitionKey'
FIELD = 'field'
DOCUMENTS_COUNT = 400
DOCS_WITH_SAME_PARTITION_KEY = 200
docs_with_numeric_id = 0
sum = 0
class AggregateQueryTestSequenceMeta(type):
def __new__(mcs, name, bases, dict):
def _run_one(query, expected_result):
def test(self):
self._execute_query_and_validate_results(mcs.client, mcs.collection_link, query, expected_result)
return test
def _setup():
if (not _config.master_key or not _config.host):
raise Exception(
"You must specify your Azure Cosmos account values for "
"'masterKey' and 'host' at the top of this class to run the "
"tests.")
mcs.client = cosmos_client.CosmosClient(_config.host,
{'masterKey': _config.master_key}, _config.connection_policy)
created_db = test_config._test_config.create_database_if_not_exist(mcs.client)
created_collection = _create_collection(mcs.client, created_db)
mcs.collection_link = _get_collection_link(created_db, created_collection)
# test documents
document_definitions = []
values = [None, False, True, "abc", "cdfg", "opqrs", "ttttttt", "xyz", "oo", "ppp"]
for value in values:
d = {_config.PARTITION_KEY: value}
document_definitions.append(d)
for i in xrange(_config.DOCS_WITH_SAME_PARTITION_KEY):
d = {_config.PARTITION_KEY: _config.UNIQUE_PARTITION_KEY,
'resourceId': i,
_config.FIELD: i + 1}
document_definitions.append(d)
_config.docs_with_numeric_id = \
_config.DOCUMENTS_COUNT - len(values) - _config.DOCS_WITH_SAME_PARTITION_KEY
for i in xrange(_config.docs_with_numeric_id):
d = {_config.PARTITION_KEY: i + 1}
document_definitions.append(d)
_config.sum = _config.docs_with_numeric_id \
* (_config.docs_with_numeric_id + 1) / 2.0
_insert_doc(mcs.collection_link, document_definitions, mcs.client)
def _generate_test_configs():
aggregate_query_format = 'SELECT VALUE {}(r.{}) FROM r WHERE {}'
aggregate_orderby_query_format = 'SELECT VALUE {}(r.{}) FROM r WHERE {} ORDER BY r.{}'
aggregate_configs = [
['AVG', _config.sum / _config.docs_with_numeric_id,
'IS_NUMBER(r.{})'.format(_config.PARTITION_KEY)],
['AVG', None, 'true'],
['COUNT', _config.DOCUMENTS_COUNT, 'true'],
['MAX', 'xyz', 'true'],
['MIN', None, 'true'],
['SUM', _config.sum, 'IS_NUMBER(r.{})'.format(_config.PARTITION_KEY)],
['SUM', None, 'true']
]
for operator, expected, condition in aggregate_configs:
_all_tests.append([
'{} {}'.format(operator, condition),
aggregate_query_format.format(operator, _config.PARTITION_KEY, condition),
expected])
_all_tests.append([
'{} {} OrderBy'.format(operator, condition),
aggregate_orderby_query_format.format(operator, _config.PARTITION_KEY, condition,
_config.PARTITION_KEY),
expected])
aggregate_single_partition_format = 'SELECT VALUE {}(r.{}) FROM r WHERE r.{} = \'{}\''
aggregate_orderby_single_partition_format = 'SELECT {}(r.{}) FROM r WHERE r.{} = \'{}\''
same_partiton_sum = _config.DOCS_WITH_SAME_PARTITION_KEY * (_config.DOCS_WITH_SAME_PARTITION_KEY + 1) / 2.0
aggregate_single_partition_configs = [
['AVG', same_partiton_sum / _config.DOCS_WITH_SAME_PARTITION_KEY],
['COUNT', _config.DOCS_WITH_SAME_PARTITION_KEY],
['MAX', _config.DOCS_WITH_SAME_PARTITION_KEY],
['MIN', 1],
['SUM', same_partiton_sum]
]
for operator, expected in aggregate_single_partition_configs:
_all_tests.append([
'{} SinglePartition {}'.format(operator, 'SELECT VALUE'),
aggregate_single_partition_format.format(
operator, _config.FIELD, _config.PARTITION_KEY, _config.UNIQUE_PARTITION_KEY), expected])
_all_tests.append([
'{} SinglePartition {}'.format(operator, 'SELECT'),
aggregate_orderby_single_partition_format.format(
operator, _config.FIELD, _config.PARTITION_KEY, _config.UNIQUE_PARTITION_KEY),
Exception()])
def _run_all():
for test_name, query, expected_result in _all_tests:
test_name = "test_%s" % test_name
dict[test_name] = _run_one(query, expected_result)
def _create_collection(client, created_db):
collection_definition = {
'id': 'aggregate tests collection ' + str(uuid.uuid4()),
'indexingPolicy': {
'includedPaths': [
{
'path': '/',
'indexes': [
{
'kind': 'Range',
'dataType': 'Number'
},
{
'kind': 'Range',
'dataType': 'String'
}
]
}
]
},
'partitionKey': {
'paths': [
'/{}'.format(_config.PARTITION_KEY)
],
'kind': documents.PartitionKind.Hash
}
}
collection_options = {'offerThroughput': 10100}
created_collection = client.CreateContainer(_get_database_link(created_db),
collection_definition,
collection_options)
return created_collection
def _insert_doc(collection_link, document_definitions, client):
created_docs = []
for d in document_definitions:
created_doc = client.CreateItem(collection_link, d)
created_docs.append(created_doc)
return created_docs
def _get_database_link(database, is_name_based=True):
if is_name_based:
return 'dbs/' + database['id']
else:
return database['_self']
def _get_collection_link(database, document_collection, is_name_based=True):
if is_name_based:
return _get_database_link(database) + '/colls/' + document_collection['id']
else:
return document_collection['_self']
_all_tests = []
_setup()
_generate_test_configs()
_run_all()
return type.__new__(mcs, name, bases, dict)
@pytest.mark.usefixtures("teardown")
class AggregationQueryTest(with_metaclass(AggregateQueryTestSequenceMeta, unittest.TestCase)):
def _execute_query_and_validate_results(self, client, collection_link, query, expected):
print('Running test with query: ' + query)
# executes the query and validates the results against the expected results
options = {'enableCrossPartitionQuery': 'true'}
result_iterable = client.QueryItems(collection_link, query, options)
def _verify_result():
######################################
# test next() behavior
######################################
it = result_iterable.__iter__()
def invokeNext():
return next(it)
# validate that invocations of next() produces the same results as expected
item = invokeNext()
self.assertEqual(item, expected)
# after the result set is exhausted, invoking next must raise a StopIteration exception
self.assertRaises(StopIteration, invokeNext)
######################################
# test fetch_next_block() behavior
######################################
fetched_res = result_iterable.fetch_next_block()
fetched_size = len(fetched_res)
self.assertEqual(fetched_size, 1)
self.assertEqual(fetched_res[0], expected)
# no more results will be returned
self.assertEqual(result_iterable.fetch_next_block(), [])
if isinstance(expected, Exception):
self.assertRaises(HTTPFailure, _verify_result)
else:
_verify_result()
if __name__ == "__main__":
unittest.main()
| mit | -9,061,135,112,470,184,000 | 41.916996 | 119 | 0.54172 | false |
jopohl/urh | src/urh/models/ProtocolTableModel.py | 1 | 2539 | from collections import defaultdict
from PyQt5.QtCore import pyqtSignal, QModelIndex, Qt
from urh import settings
from urh.models.TableModel import TableModel
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
from urh.ui.actions.DeleteBitsAndPauses import DeleteBitsAndPauses
class ProtocolTableModel(TableModel):
ref_index_changed = pyqtSignal(int)
def __init__(self, proto_analyzer: ProtocolAnalyzer, participants, controller, parent=None):
super().__init__(participants=participants, parent=parent)
self.controller = controller # type: urh.controller.CompareFrameController.CompareFrameController
self.protocol = proto_analyzer
self.active_group_ids = [0]
@property
def diff_columns(self) -> defaultdict(set):
return self._diffs
@property
def refindex(self):
return self._refindex
@refindex.setter
def refindex(self, refindex):
if refindex != self._refindex:
self._refindex = refindex
self.update()
self.ref_index_changed.emit(self._refindex)
def refresh_fonts(self):
self.bold_fonts.clear()
self.text_colors.clear()
for i in self._diffs.keys():
for j in self._diffs[i]:
self.bold_fonts[i, j] = True
self.text_colors[i, j] = settings.DIFFERENCE_CELL_COLOR
if self._refindex >= 0:
for j in range(self.col_count):
self.text_colors[self._refindex, j] = settings.SELECTED_ROW_COLOR
def delete_range(self, min_row: int, max_row: int, start: int, end: int):
if not self.is_writeable:
return
del_action = DeleteBitsAndPauses(proto_analyzer=self.protocol, start_message=min_row, end_message=max_row,
start=start, end=end, view=self.proto_view, decoded=True,
subprotos=self.controller.protocol_list, update_label_ranges=False)
self.undo_stack.push(del_action)
def flags(self, index: QModelIndex):
if index.isValid():
alignment_offset = self.get_alignment_offset_at(index.row())
if index.column() < alignment_offset:
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
if self.is_writeable:
return Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
return Qt.NoItemFlags
| gpl-3.0 | 7,597,071,940,439,484,000 | 35.797101 | 114 | 0.634108 | false |
dsparrow27/vortex | src/ds/vortex/nodes/conversion/toArray.py | 1 | 1063 | from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class ToArray(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.output = plugs.OutputPlug("output", self)
self.valuePlug_ = plugs.InputPlug("value", self, value=[])
self.addPlug(self.output, clean=True)
self.addPlug(self.valuePlug_, clean=True)
self.plugAffects(self.valuePlug_, self.output)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if not requestPlug == self.output:
return None
result = [self.valuePlug_.value]
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return ToArray
| mit | 5,942,302,473,169,311,000 | 29.371429 | 85 | 0.638758 | false |
mozilla/olympia | src/olympia/bandwagon/tests/test_admin.py | 1 | 14684 | from django.conf import settings
from django.urls import reverse
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase, addon_factory, formset, user_factory
from olympia.bandwagon.models import Collection, CollectionAddon
class TestCollectionAdmin(TestCase):
def setUp(self):
self.admin_home_url = reverse('admin:index')
self.list_url = reverse('admin:bandwagon_collection_changelist')
def test_can_see_bandwagon_module_in_admin_with_collections_edit(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Collections:Edit')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == ['Bandwagon']
def test_can_see_bandwagon_module_in_admin_with_admin_curation(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
# Curators can also see the Addons module, to edit addon replacements.
assert modules == ['Addons', 'Bandwagon']
def test_can_not_see_bandwagon_module_in_admin_without_permissions(self):
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == []
def test_can_list_with_collections_edit_permission(self):
collection = Collection.objects.create(slug='floob')
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Collections:Edit')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert collection.slug in response.content.decode('utf-8')
def test_can_list_with_admin_curation_permission(self):
collection = Collection.objects.create(slug='floob')
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert collection.slug in response.content.decode('utf-8')
def test_cant_list_without_special_permission(self):
collection = Collection.objects.create(slug='floob')
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
assert collection.slug not in response.content.decode('utf-8')
def test_can_edit_with_collections_edit_permission(self):
collection = Collection.objects.create(slug='floob')
addon = addon_factory()
addon2 = addon_factory()
collection_addon = CollectionAddon.objects.create(
addon=addon, collection=collection
)
self.detail_url = reverse(
'admin:bandwagon_collection_change', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Collections:Edit')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert collection.slug in content
assert str(addon.name) in content
post_data = {
# Django wants the whole form to be submitted, unfortunately.
'application': amo.FIREFOX.id,
'type': collection.type,
'default_locale': collection.default_locale,
'author': user.pk,
}
post_data['slug'] = 'bar'
post_data.update(
formset(
{
'addon': addon2.pk,
'id': collection_addon.pk,
'collection': collection.pk,
'ordering': 1,
},
prefix='collectionaddon_set',
)
)
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 200
collection.reload()
collection_addon.reload()
assert collection.slug == 'bar'
assert collection_addon.addon == addon2
assert collection_addon.collection == collection
assert CollectionAddon.objects.count() == 1
def test_can_not_list_without_collections_edit_permission(self):
collection = Collection.objects.create(slug='floob')
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
assert collection.slug not in response.content.decode('utf-8')
def test_can_not_edit_without_collections_edit_permission(self):
collection = Collection.objects.create(slug='floob')
self.detail_url = reverse(
'admin:bandwagon_collection_change', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 403
assert collection.slug not in response.content.decode('utf-8')
post_data = {
# Django wants the whole form to be submitted, unfortunately.
'application': amo.FIREFOX.id,
'type': collection.type,
'default_locale': collection.default_locale,
'author': user.pk,
}
post_data['slug'] = 'bar'
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 403
collection.reload()
assert collection.slug == 'floob'
def test_can_do_limited_editing_with_admin_curation_permission(self):
collection = Collection.objects.create(slug='floob')
addon = addon_factory()
addon2 = addon_factory()
collection_addon = CollectionAddon.objects.create(
addon=addon, collection=collection
)
self.detail_url = reverse(
'admin:bandwagon_collection_change', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 403
assert collection.slug not in response.content.decode('utf-8')
post_data = {
# Django wants the whole form to be submitted, unfortunately.
'application': amo.FIREFOX.id,
'type': collection.type,
'default_locale': collection.default_locale,
'author': user.pk,
}
post_data['slug'] = 'bar'
post_data.update(
formset(
{
'addon': addon2.pk,
'id': collection_addon.pk,
'collection': collection.pk,
'ordering': 1,
},
prefix='collectionaddon_set',
)
)
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 403
collection.reload()
collection_addon.reload()
assert collection.slug == 'floob'
assert collection_addon.addon == addon
# Now, if it's a mozilla collection, you can edit it.
mozilla = user_factory(username='mozilla', id=settings.TASK_USER_ID)
collection.update(author=mozilla)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert collection.slug in content
assert str(addon.name) in content
post_data = {
# Django wants the whole form to be submitted, unfortunately.
'application': amo.FIREFOX.id,
'type': collection.type,
'default_locale': collection.default_locale,
'author': mozilla.pk,
}
post_data['slug'] = 'bar'
post_data.update(
formset(
{
'addon': addon2.pk,
'id': collection_addon.pk,
'collection': collection.pk,
'ordering': 1,
},
prefix='collectionaddon_set',
)
)
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 200
collection.reload()
assert collection.slug == 'bar'
assert collection.author.pk == mozilla.pk
collection_addon.reload()
assert collection_addon.addon == addon2 # Editing the addon worked.
# You can also edit it if it's your own (allowing you, amongst other
# things, to transfer it to mozilla)
collection.update(author=user)
response = self.client.get(self.detail_url, follow=True)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert collection.slug in content
assert str(addon2.name) in content
assert CollectionAddon.objects.filter(collection=collection).count() == 1
post_data = {
# Django wants the whole form to be submitted, unfortunately.
'application': amo.FIREFOX.id,
'type': collection.type,
'default_locale': collection.default_locale,
'author': mozilla.pk,
}
post_data['slug'] = 'fox'
post_data.update(
formset(
{
'addon': addon2.pk,
'id': collection_addon.pk,
'collection': collection.pk,
'ordering': 1,
},
{
'addon': addon.pk,
'id': '', # Addition, no existing id.
'collection': collection.pk,
'ordering': 2,
},
prefix='collectionaddon_set',
initial_count=1,
)
)
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 200
collection.reload()
assert collection.slug == 'fox'
assert collection.author.pk == mozilla.pk
assert (
CollectionAddon.objects.filter(collection=collection).count() == 2
) # Adding the addon worked.
# Delete the first collection addon. We need to alter INITIAL-FORMS and
# the id of the second one, now that this second CollectionAddon
# instance was created.
post_data['collectionaddon_set-INITIAL_FORMS'] = 2
post_data['collectionaddon_set-0-DELETE'] = 'on'
post_data['collectionaddon_set-1-id'] = (
CollectionAddon.objects.filter(collection=collection, addon=addon).get().pk
)
response = self.client.post(self.detail_url, post_data, follow=True)
assert response.status_code == 200
assert CollectionAddon.objects.filter(collection=collection).count() == 1
assert (
CollectionAddon.objects.filter(collection=collection, addon=addon).count()
== 1
)
assert (
CollectionAddon.objects.filter(collection=collection, addon=addon2).count()
== 0
)
def test_can_not_delete_with_collections_edit_permission(self):
collection = Collection.objects.create(slug='floob')
self.delete_url = reverse(
'admin:bandwagon_collection_delete', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Collections:Edit')
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 403
response = self.client.post(self.delete_url, data={'post': 'yes'}, follow=True)
assert response.status_code == 403
assert Collection.objects.filter(pk=collection.pk).exists()
def test_can_not_delete_with_admin_curation_permission(self):
collection = Collection.objects.create(slug='floob')
self.delete_url = reverse(
'admin:bandwagon_collection_delete', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Admin:Curation')
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 403
response = self.client.post(self.delete_url, data={'post': 'yes'}, follow=True)
assert response.status_code == 403
assert Collection.objects.filter(pk=collection.pk).exists()
# Even a mozilla one.
mozilla = user_factory(username='mozilla', id=settings.TASK_USER_ID)
collection.update(author=mozilla)
response = self.client.get(self.delete_url, follow=True)
assert response.status_code == 403
response = self.client.post(self.delete_url, data={'post': 'yes'}, follow=True)
assert response.status_code == 403
assert Collection.objects.filter(pk=collection.pk).exists()
def test_can_delete_with_admin_advanced_permission(self):
collection = Collection.objects.create(slug='floob')
self.delete_url = reverse(
'admin:bandwagon_collection_delete', args=(collection.pk,)
)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.post(self.delete_url, data={'post': 'yes'}, follow=True)
assert response.status_code == 200
assert not Collection.objects.filter(pk=collection.pk).exists()
| bsd-3-clause | -8,593,084,798,155,228,000 | 41.810496 | 87 | 0.60869 | false |
cbertinato/pandas | pandas/tests/io/test_gcs.py | 1 | 2310 | from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame, date_range, read_csv
from pandas.util import _test_decorators as td
from pandas.util.testing import assert_frame_equal
from pandas.io.common import is_gcs_url
def test_is_gcs_url():
assert is_gcs_url("gcs://pandas/somethingelse.com")
assert is_gcs_url("gs://pandas/somethingelse.com")
assert not is_gcs_url("s3://pandas/somethingelse.com")
@td.skip_if_no('gcsfs')
def test_read_csv_gcs(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
class MockGCSFileSystem:
def open(*args):
return StringIO(df1.to_csv(index=False))
monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem)
df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
assert_frame_equal(df1, df2)
@td.skip_if_no('gcsfs')
def test_to_csv_gcs(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
s = StringIO()
class MockGCSFileSystem:
def open(*args):
return s
monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem)
df1.to_csv('gs://test/test.csv', index=True)
df2 = read_csv(StringIO(s.getvalue()), parse_dates=['dt'], index_col=0)
assert_frame_equal(df1, df2)
@td.skip_if_no('gcsfs')
def test_gcs_get_filepath_or_buffer(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
def mock_get_filepath_or_buffer(*args, **kwargs):
return (StringIO(df1.to_csv(index=False)),
None, None, False)
monkeypatch.setattr('pandas.io.gcs.get_filepath_or_buffer',
mock_get_filepath_or_buffer)
df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
assert_frame_equal(df1, df2)
@pytest.mark.skipif(td.safe_import('gcsfs'),
reason='Only check when gcsfs not installed')
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
read_csv('gs://test/test.csv')
assert 'gcsfs library is required' in str(e.value)
| bsd-3-clause | 3,071,893,179,517,933,000 | 31.083333 | 78 | 0.618615 | false |
kuldat/pyramid_oauthlib | pyramid_oauthlib/provider/oauth2/interfaces.py | 1 | 17421 | from zope.interface import Interface, Attribute
class IOAuth2Provider(Interface):
server = Attribute("""oauthlib.oauth2.Server instance""")
class IOAuth2DataProvider(Interface):
def get_client(client_id, *args, **kw):
"""Client getter function.
Accepts one parameter `client_id`, and returns
a client object with at least these information:
- client_id: A random string
- client_secret: A random string
- client_type: A string represents if it is `confidential`
- redirect_uris: A list of redirect uris
- default_redirect_uri: One of the redirect uris
- default_scopes: Default scopes of the client
The client may contain more information, which is suggested:
- allowed_grant_types: A list of grant types
- allowed_response_types: A list of response types
- validate_scopes: A function to validate scopes
"""
def get_user(username, password, *args, **kw):
"""User getter function.
Required for password credential authorization
"""
def get_token(access_token=None, refresh_token=None):
"""Token getter function.
Accepts an `access_token` or `refresh_token` parameters,
and returns a token object with at least these information:
- access_token: A string token
- refresh_token: A string token
- client_id: ID of the client
- scopes: A list of scopes
- expires: A `datetime.datetime` object
- user: The user object
"""
def set_token(token, request):
"""Save the bearer token.
Accepts two parameters at least, one is token,
the other is request::
@oauth.tokensetter
def set_token(token, request, *args, **kwargs):
save_token(token, request.client, request.user)
The parameter token is a dict, that looks like::
{
u'access_token': u'6JwgO77PApxsFCU8Quz0pnL9s23016',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'scope': u'email address'
}
The request is an object, that contains an user object and a
client object.
"""
def get_grant(client_id, code, *args, **kw):
"""Grant getter function.
The function accepts `client_id`, `code` and more
It returns a grant object with at least these information:
- delete: A function to delete itself
"""
def set_grant(client_id, code, request, *args, **kwargs):
"""Grant setter function.
The function accepts `client_id`, `code`, `request` and more::
"""
class IOAuth2Validator(Interface):
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
OBS! Certain grant types rely on this authentication, possibly with
other fallbacks, and for them to recognize this authorization please
set the client attribute on the request (request.client). Note that
preferably this client object should have a client_id attribute of
unicode type (request.client.client_id).
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: http://tools.ietf.org/html/rfc1945#section-11.1
"""
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
If the client specifies a redirect_uri when obtaining code then
that redirect URI must be bound to the code and verified equal
in this method.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier
:param code: The authorization code grant (request.code).
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be associated with:
- a client and it's client_id
- the redirect URI used (request.redirect_uri)
- whether the redirect URI used is the client default or not
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
The authorization code grant dict (code) holds at least the key 'code'::
{'code': 'sdf345jsdf0934f'}
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
"""
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes
:param client_id: Unicode client identifier
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: The HTTP Request (oauthlib.common.Request)
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the authorization_code is valid and assigned to client.
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes and
request.state must also be set.
:param client_id: Unicode client identifier
:param code: Unicode authorization code
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param redirect_uri: Unicode absolute URI
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier
:param scopes: List of scopes (defined by you)
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
""" | mit | 2,059,025,845,611,090,400 | 38.41629 | 97 | 0.631824 | false |
nataddrho/DigiCue-USB | Python3/src/venv/Lib/site-packages/pip/_internal/commands/list.py | 1 | 11430 | import json
import logging
from pip._internal.cli import cmdoptions
from pip._internal.cli.req_command import IndexGroupCommand
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.utils.compat import stdlib_pkgs
from pip._internal.utils.misc import (
dist_is_editable,
get_installed_distributions,
tabulate,
write_output,
)
from pip._internal.utils.packaging import get_installer
from pip._internal.utils.parallel import map_multithread
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Iterator, List, Set, Tuple
from pip._vendor.pkg_resources import Distribution
from pip._internal.network.session import PipSession
logger = logging.getLogger(__name__)
class ListCommand(IndexGroupCommand):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
ignore_require_venv = True
usage = """
%prog [options]"""
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
self.cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
self.cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
self.cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(cmdoptions.list_path())
self.cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
self.cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"or json",
)
self.cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
self.cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest='include_editable',
help='Exclude editable package from output.',
)
self.cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',
default=True,
)
self.cmd_opts.add_option(cmdoptions.list_exclude())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group, self.parser
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
def _build_package_finder(self, options, session):
# type: (Values, PipSession) -> PackageFinder
"""
Create a package finder appropriate to this list command.
"""
link_collector = LinkCollector.create(session, options=options)
# Pass allow_yanked=False to ignore yanked versions.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=options.pre,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
def run(self, options, args):
# type: (Values, List[str]) -> int
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
cmdoptions.check_list_path_option(options)
skip = set(stdlib_pkgs)
if options.excludes:
skip.update(options.excludes)
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
paths=options.path,
skip=skip,
)
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
return SUCCESS
def get_outdated(self, packages, options):
# type: (List[Distribution], Values) -> List[Distribution]
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
# type: (List[Distribution], Values) -> List[Distribution]
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
# type: (List[Distribution], Values) -> List[Distribution]
dep_keys = set() # type: Set[Distribution]
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
# Create a set to remove duplicate packages, and cast it to a list
# to keep the return type consistent with get_outdated and
# get_uptodate
return list({pkg for pkg in packages if pkg.key not in dep_keys})
def iter_packages_latest_infos(self, packages, options):
# type: (List[Distribution], Values) -> Iterator[Distribution]
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
def latest_info(dist):
# type: (Distribution) -> Distribution
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
evaluator = finder.make_candidate_evaluator(
project_name=dist.project_name,
)
best_candidate = evaluator.sort_best_candidate(all_candidates)
if best_candidate is None:
return None
remote_version = best_candidate.version
if best_candidate.link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
return dist
for dist in map_multithread(latest_info, packages):
if dist is not None:
yield dist
def output_package_listing(self, packages, options):
# type: (List[Distribution], Values) -> None
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
write_output("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
write_output("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
write_output(format_for_json(packages, options))
def output_package_listing_columns(self, data, header):
# type: (List[List[str]], List[str]) -> None
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
write_output(val)
def format_for_columns(pkgs, options):
# type: (List[Distribution], Values) -> Tuple[List[List[str]], List[str]]
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
def format_for_json(packages, options):
# type: (List[Distribution], Values) -> str
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': str(dist.version),
}
if options.verbose >= 1:
info['location'] = dist.location
info['installer'] = get_installer(dist)
if options.outdated:
info['latest_version'] = str(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| mit | 2,567,889,296,724,694,000 | 34.386997 | 79 | 0.584164 | false |
SCM-NV/qmworks-namd | nanoqm/workflows/schemas.py | 1 | 9939 | """Schemas to valid user input.
Index
-----
.. currentmodule:: nanoqm.workflows.schemas
.. autosummary::
{autosummary}
API
---
{autodata}
"""
__all__ = [
'schema_cp2k_general_settings',
'schema_derivative_couplings',
'schema_single_points',
'schema_distribute_absorption_spectrum',
'schema_distribute_derivative_couplings',
'schema_distribute_single_points',
'schema_absorption_spectrum',
'schema_ipr',
'schema_coop']
import os
from numbers import Real
import pkg_resources as pkg
from schema import And, Optional, Or, Schema, Use
from typing import Any, Dict, Iterable
def equal_lambda(name: str) -> And:
"""Create an schema checking that the keyword matches the expected value."""
return And(
str, Use(str.lower), lambda s: s == name)
def any_lambda(array: Iterable[str]) -> And:
"""Create an schema checking that the keyword matches one of the expected values."""
return And(
str, Use(str.lower), lambda s: s in array)
def merge(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
"""Merge two dictionaries using without modifying the original."""
x = d1.copy()
x.update(d2)
return x
#: Schema to validate the CP2K general settings
schema_cp2k_general_settings = Schema({
# "Basis set to carry out the quantum chemistry simulation"
"basis": str,
# "Pseudo-potential to carry out the quantum chemistry simulation"
"potential": str,
# Charge of the system
Optional("charge", default=0): int,
# Multiplicity
Optional("multiplicity", default=1): int,
# Specify the Cartesian components for the cell vector
"cell_parameters": Or(
Real,
lambda xs: len(xs) == 3 and isinstance(xs, list),
lambda xs: len(xs) == 3 and all(len(r) == 3 for r in xs)),
# Type of periodicity
"periodic": any_lambda(("none", "x", "y", "z", "xy", "xy", "yz", "xyz")),
# Specify the angles between the vectors defining the unit cell
Optional("cell_angles"): list,
# Path to the folder containing the basis set specifications
Optional("path_basis", default=pkg.resource_filename("nanoqm", "basis")): os.path.isdir,
# Settings describing the input of the quantum package
"cp2k_settings_main": object,
# Settings describing the input of the quantum package
# to compute the guess wavefunction"
"cp2k_settings_guess": object,
# Restart File Name
Optional("wfn_restart_file_name", default=None): Or(str, None),
# File containing the Parameters of the cell if those
# parameters change during the MD simulation.
Optional("file_cell_parameters", default=None): Or(str, None),
# Quality of the auxiliar basis cFIT
Optional("aux_fit", default="verygood"):
any_lambda(("low", "medium", "good", "verygood", "excellent")),
# executable name
Optional("executable", default="cp2k.popt"): any_lambda(
[f"cp2k.{ext}" for ext in (
# Serial single core testing and debugging
"sdbg",
# Serial general single core usage
"sopt",
# Parallel (only OpenMP), single node, multi core
"ssmp",
# Parallel (only MPI) multi-node testing and debugging
"pdbg",
# Parallel (only MPI) general usage, no threads
"popt",
# parallel (MPI + OpenMP) general usage, threading might improve scalability and memory usage
"psmp"
)])
})
#: Dictionary with the options common to all workflows
dict_general_options = {
# Number of occupied/virtual orbitals to use
Optional('active_space', default=[10, 10]): And(list, lambda xs: len(xs) == 2),
# Index of the HOMO
Optional("nHOMO"): int,
# Index of the orbitals to compute the couplings
Optional("mo_index_range"): tuple,
# "default quantum package used"
Optional("package_name", default="cp2k"): str,
# project
Optional("project_name", default="namd"): str,
# Working directory
Optional("scratch_path", default=None): Or(None, str),
# path to the HDF5 to store the results
Optional("path_hdf5", default="quantum.hdf5"): str,
# path to xyz trajectory of the Molecular dynamics
"path_traj_xyz": os.path.exists,
# Real from where to start enumerating the folders create for each point
# in the MD
Optional("enumerate_from", default=0): int,
# Ignore the warning issues by the quantum package and keep computing
Optional("ignore_warnings", default=False): bool,
# Calculate the guess wave function in either the first point of the
# trajectory or in all
Optional("calculate_guesses", default="first"):
any_lambda(("first", "all")),
# Units of the molecular geometry on the MD file
Optional("geometry_units", default="angstrom"):
any_lambda(("angstrom", "au")),
# Integration time step used for the MD (femtoseconds)
Optional("dt", default=1): Real,
# Deactivate the computation of the orbitals for debugging purposes
Optional("compute_orbitals", default=True): bool,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
#: Dict with input options to run a derivate coupling workflow
dict_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("derivative_couplings"),
# Algorithm used to compute the derivative couplings
Optional("algorithm", default="levine"):
any_lambda(("levine", "3points")),
# Use MPI to compute the couplings
Optional("mpi", default=False): bool,
# Track the crossing between states
Optional("tracking", default=True): bool,
# Write the overlaps in ascii
Optional("write_overlaps", default=False): bool,
# Compute the overlap between molecular geometries using a dephase"
Optional("overlaps_deph", default=False): bool
}
dict_merged_derivative_couplings = merge(
dict_general_options, dict_derivative_couplings)
#: Schema to validate the input for a derivative coupling calculation
schema_derivative_couplings = Schema(
dict_merged_derivative_couplings)
#: Schema to validate the input for a job scheduler
schema_job_scheduler = Schema({
Optional("scheduler", default="slurm"):
any_lambda(("slurm", "pbs")),
Optional("nodes", default=1): int,
Optional("tasks", default=1): int,
Optional("wall_time", default="01:00:00"): str,
Optional("job_name", default="namd"): str,
Optional("queue_name", default="short"): str,
Optional("load_modules", default=""): str,
Optional("free_format", default=""): str
})
#: Input options to distribute a job
dict_distribute = {
Optional("workdir", default=os.getcwd()): str,
# Number of chunks to split the trajectory
"blocks": int,
# Resource manager configuration
"job_scheduler": schema_job_scheduler,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
}
#: input to distribute a derivative coupling job
dict_distribute_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_derivative_couplings")
}
#: Schema to validate the input to distribute a derivate coupling calculation
schema_distribute_derivative_couplings = Schema(
merge(
dict_distribute,
merge(
dict_merged_derivative_couplings,
dict_distribute_derivative_couplings)))
#: Input for an absorption spectrum calculation
dict_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("absorption_spectrum"),
# Type of TDDFT calculations. Available: sing_orb, stda, stddft
Optional("tddft", default="stda"): And(
str, Use(str.lower), lambda s: s in ("sing_orb", "stda", "stdft")),
# Interval between MD points where the oscillators are computed"
Optional("stride", default=1): int,
# description: Exchange-correlation functional used in the DFT
# calculations,
Optional("xc_dft", default="pbe"): str
}
dict_merged_absorption_spectrum = merge(
dict_general_options, dict_absorption_spectrum)
#: Schema to validate the input for an absorption spectrum calculation
schema_absorption_spectrum = Schema(dict_merged_absorption_spectrum)
dict_distribute_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_absorption_spectrum")
}
schema_distribute_absorption_spectrum = Schema(
merge(dict_distribute, merge(
dict_merged_absorption_spectrum, dict_distribute_absorption_spectrum)))
dict_single_points = {
# Name of the workflow to run
"workflow": any_lambda(("single_points", "ipr_calculation", "coop_calculation")),
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
#: input to distribute single point calculations
dict_distribute_single_points = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_single_points")
}
#: Input for a Crystal Orbital Overlap Population calculation
dict_coop = {
# List of the two elements to calculate the COOP for
"coop_elements": list}
dict_merged_single_points = merge(dict_general_options, dict_single_points)
#: Schema to validate the input of a single pointe calculation
schema_single_points = Schema(dict_merged_single_points)
#: Schema to validate the input for a Inverse Participation Ratio calculation
schema_ipr = schema_single_points
#: Input for a Crystal Orbital Overlap Population calculation
dict_merged_coop = merge(dict_merged_single_points, dict_coop)
#: Schema to validate the input for a Crystal Orbital Overlap Population calculation
schema_coop = Schema(dict_merged_coop)
#: Schema to validate the input to distribute a single point calculation
schema_distribute_single_points = Schema(
merge(dict_distribute, merge(
dict_merged_single_points, dict_distribute_single_points)))
| mit | 6,000,909,809,273,182,000 | 29.770898 | 105 | 0.683268 | false |
ray-project/ray | rllib/agents/marwil/marwil.py | 1 | 5753 | from typing import Optional, Type
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.agents.marwil.marwil_tf_policy import MARWILTFPolicy
from ray.rllib.execution.replay_ops import Replay, StoreToReplayBuffer
from ray.rllib.execution.replay_buffer import LocalReplayBuffer
from ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches
from ray.rllib.execution.concurrency_ops import Concurrently
from ray.rllib.execution.train_ops import TrainOneStep
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.utils.typing import TrainerConfigDict
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.util.iter import LocalIterator
from ray.rllib.policy.policy import Policy
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Input settings ===
# You should override this to point to an offline dataset
# (see trainer.py).
# The dataset may have an arbitrary number of timesteps
# (and even episodes) per line.
# However, each line must only contain consecutive timesteps in
# order for MARWIL to be able to calculate accumulated
# discounted returns. It is ok, though, to have multiple episodes in
# the same line.
"input": "sampler",
# Use importance sampling estimators for reward.
"input_evaluation": ["is", "wis"],
# === Postprocessing/accum., discounted return calculation ===
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf in
# case an input line ends with a non-terminal timestep.
"use_gae": True,
# Whether to calculate cumulative rewards. Must be True.
"postprocess_inputs": True,
# === Training ===
# Scaling of advantages in exponential terms.
# When beta is 0.0, MARWIL is reduced to behavior cloning
# (imitation learning); see bc.py algorithm in this same directory.
"beta": 1.0,
# Balancing value estimation loss and policy optimization loss.
"vf_coeff": 1.0,
# If specified, clip the global norm of gradients by this amount.
"grad_clip": None,
# Learning rate for Adam optimizer.
"lr": 1e-4,
# The squared moving avg. advantage norm (c^2) update rate
# (1e-8 in the paper).
"moving_average_sqd_adv_norm_update_rate": 1e-8,
# Starting value for the squared moving avg. advantage norm (c^2).
"moving_average_sqd_adv_norm_start": 100.0,
# Number of (independent) timesteps pushed through the loss
# each SGD round.
"train_batch_size": 2000,
# Size of the replay buffer in (single and independent) timesteps.
# The buffer gets filled by reading from the input files line-by-line
# and adding all timesteps on one line at once. We then sample
# uniformly from the buffer (`train_batch_size` samples) for
# each training step.
"replay_buffer_size": 10000,
# Number of steps to read before learning starts.
"learning_starts": 0,
# === Parallelism ===
"num_workers": 0,
})
# __sphinx_doc_end__
# yapf: enable
def get_policy_class(config: TrainerConfigDict) -> Optional[Type[Policy]]:
"""Policy class picker function. Class is chosen based on DL-framework.
MARWIL/BC have both TF and Torch policy support.
Args:
config (TrainerConfigDict): The trainer's configuration dict.
Returns:
Optional[Type[Policy]]: The Policy class to use with DQNTrainer.
If None, use `default_policy` provided in build_trainer().
"""
if config["framework"] == "torch":
from ray.rllib.agents.marwil.marwil_torch_policy import \
MARWILTorchPolicy
return MARWILTorchPolicy
def execution_plan(workers: WorkerSet,
config: TrainerConfigDict) -> LocalIterator[dict]:
"""Execution plan of the MARWIL/BC algorithm. Defines the distributed
dataflow.
Args:
workers (WorkerSet): The WorkerSet for training the Polic(y/ies)
of the Trainer.
config (TrainerConfigDict): The trainer's configuration dict.
Returns:
LocalIterator[dict]: A local iterator over training metrics.
"""
rollouts = ParallelRollouts(workers, mode="bulk_sync")
replay_buffer = LocalReplayBuffer(
learning_starts=config["learning_starts"],
buffer_size=config["replay_buffer_size"],
replay_batch_size=config["train_batch_size"],
replay_sequence_length=1,
)
store_op = rollouts \
.for_each(StoreToReplayBuffer(local_buffer=replay_buffer))
replay_op = Replay(local_buffer=replay_buffer) \
.combine(
ConcatBatches(
min_batch_size=config["train_batch_size"],
count_steps_by=config["multiagent"]["count_steps_by"],
)) \
.for_each(TrainOneStep(workers))
train_op = Concurrently(
[store_op, replay_op], mode="round_robin", output_indexes=[1])
return StandardMetricsReporting(train_op, workers, config)
def validate_config(config: TrainerConfigDict) -> None:
"""Checks and updates the config based on settings."""
if config["num_gpus"] > 1:
raise ValueError("`num_gpus` > 1 not yet supported for MARWIL!")
if config["postprocess_inputs"] is False and config["beta"] > 0.0:
raise ValueError("`postprocess_inputs` must be True for MARWIL (to "
"calculate accum., discounted returns)!")
MARWILTrainer = build_trainer(
name="MARWIL",
default_config=DEFAULT_CONFIG,
default_policy=MARWILTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
execution_plan=execution_plan)
| apache-2.0 | -1,755,914,939,628,925,200 | 38.136054 | 76 | 0.690075 | false |
dekked/dynamodb-mock | tests/functional/pyramid/test_scan.py | 1 | 4232 | # -*- coding: utf-8 -*-
import unittest, json
TABLE_NAME1 = 'Table-1'
TABLE_RT = 45
TABLE_WT = 123
TABLE_NAME = 'Table-HR'
TABLE_RT = 45
TABLE_WT = 123
TABLE_RT2 = 10
TABLE_WT2 = 10
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HK_VALUE1 = u'123'
HK_VALUE2 = u'456'
HK_VALUE3 = u'789'
RK_VALUE1 = u'Waldo-1'
RK_VALUE2 = u'Waldo-2'
RK_VALUE3 = u'Waldo-3'
RK_VALUE4 = u'Waldo-4'
RK_VALUE5 = u'Waldo-5'
ITEM1 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE1},
u'relevant_data': {u'S': u'tata'},
}
ITEM2 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE2},
u'relevant_data': {u'S': u'tete'},
}
ITEM3 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE2},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE3},
u'relevant_data': {u'S': u'titi'},
}
ITEM4 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE4},
u'relevant_data': {u'S': u'toto'},
}
ITEM5 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE5},
u'relevant_data': {u'S': u'tutu'},
}
HEADERS = {
'x-amz-target': 'dynamodb_20111205.Scan',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestScan(unittest.TestCase):
def setUp(self):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock.database.db import dynamodb
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key)
dynamodb.data[TABLE_NAME] = self.t1
self.t1.put(ITEM1, {})
self.t1.put(ITEM2, {})
self.t1.put(ITEM3, {})
self.t1.put(ITEM4, {})
self.t1.put(ITEM5, {})
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
def test_scan_condition_filter_fields(self):
from ddbmock.database.db import dynamodb
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [{"S":"toto"},{"S":"titi"},{"S":"tata"}],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
}
expected = {
u"Count": 3,
u"ScannedCount": 5,
u"Items": [
{u"relevant_data": {u"S": u"tata"}},
{u"relevant_data": {u"S": u"toto"}},
{u"relevant_data": {u"S": u"titi"}},
],
u"ConsumedCapacityUnits": 0.5,
}
# Protocol check
res = self.app.post_json('/', request, HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8', res.headers['Content-Type'])
def test_scan_count_and_attrs_to_get_fails(self):
from ddbmock.database.db import dynamodb
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [{"S":"toto"},{"S":"titi"},{"S":"tata"}],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
"Count": True,
}
expected = {
u'__type': u'com.amazonaws.dynamodb.v20111205#ValidationException',
u'message': u'Can not filter fields when only count is requested'
}
# Protocol check
res = self.app.post_json('/', request, HEADERS, status=400)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8', res.headers['Content-Type'])
| lgpl-3.0 | -856,757,890,835,981,000 | 27.986301 | 98 | 0.559074 | false |
yancz1989/cancer | utilities.py | 1 | 4491 | import SimpleITK as sitk
import numpy as np
import csv
import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import SimpleITK as sitk
from cv2 import imread, imwrite
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def readCSV(filename):
lines = []
with open(filename, "rb") as f:
csvreader = csv.reader(f)
for line in csvreader:
lines.append(line)
return lines
def voxel_2_world(voxel_coord, itkimage):
world_coord = list(reversed(
itkimage.TransformContinuousIndexToPhysicalPoint(list(reversed(voxel_coord)))))
return world_coord
def voxelCoordToWorld(voxelCoord, origin, spacing):
stretchedVoxelCoord = voxelCoord * spacing
worldCoord = stretchedVoxelCoord + origin
return worldCoord
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def normalizePlanes(npzarray):
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray > 1] = 1.
npzarray[npzarray < 0] = 0.
return npzarray
def readFileNameMap(map_filename):
file_map = {}
with open(map_filename) as map_file:
file_name_list = json.load(map_file)
for it in file_name_list:
file_map[it['ID_name']] = it['long_name']
return file_map
def parse_image_file(filename):
cols = filename.split("-")
subset = cols[0]
key = cols[1]
z_axis = int(cols[2])
return key, subset[:subset.index('/')], z_axis
def filterBoxes(boxes, threshold):
filtered_boxes = []
for box in boxes:
if box[4] >= threshold:
filtered_boxes.append(box)
return filtered_boxes
def readResultMap(result_filename, file_map, threshold):
result_map = {}
with open(result_filename) as result_file:
result_list = json.load(result_file)
for it in result_list:
filename = it['file']
key = file_map[filename]
key = os.path.splitext(key)[0]
boxes = it['box']
boxes = filterBoxes(boxes, threshold)
if not result_map.get(key):
result_map[key] = []
cols = filename.split('_')
index = int(cols[2])
result_map[key].append((index, boxes))
for key, val in result_map.iteritems():
val.sort()
return result_map
def readImageMap(filename):
lines = readCSV(filename)
result = {}
for line in lines[1:]:
worldCoord = np.asarray(
[float(line[3]), float(line[2]), float(line[1])])
radius = float(line[4]) / 2.0 + 1.0
if not result.get(line[0]):
result[line[0]] = []
result[line[0]].append((worldCoord, radius))
return result
def trans(boxes, H, confs, thr = -1.0):
gw = H['grid_width']
gh = H['grid_height']
cell_pix_size = H['region_size']
rnnl = H['rnn_len']
ncls = H['num_classes']
boxes = np.reshape(boxes, (-1, gh, gw, rnnl, 4))
confs = np.reshape(confs, (-1, gh, gw, rnnl, ncls))
ret = []
for i in range(rnnl):
for y in range(gh):
for x in range(gw):
if np.max(confs[0, y, x, i, 1:]) > thr:
box = boxes[0, y, x, i, :]
abs_cx = int(box[0]) + cell_pix_size/2 + cell_pix_size * x
abs_cy = int(box[1]) + cell_pix_size/2 + cell_pix_size * y
w = box[2]
h = box[3]
ret.append([abs_cx, abs_cy, w, h, np.max(confs[0, y, x, i, 1: ])])
return np.array(ret)
def split(meta_root, samples):
np.random.seed(2012310818)
l = len(samples)
idxes = np.random.permutation(np.arange(l))
train = [dat[i] for i in idxes[0 : int(l * 0.7)]]
vals = [dat[i] for i in idxes[int(l * 0.7) : ]]
with open(meta_root + 'train.json', 'w') as g:
json.dump(train, g)
with open(meta_root + 'vals.json', 'w') as g:
json.dump(vals, g)
def writeCSV(filename, lines):
with open(filename, "wb") as f:
csvwriter = csv.writer(f)
csvwriter.writerows(lines)
def tryFloat(value):
try:
value = float(value)
except:
value = value
return value
def getColumn(lines, columnid, elementType=''):
column = []
for line in lines:
try:
value = line[columnid]
except:
continue
if elementType == 'float':
value = tryFloat(value)
column.append(value)
return column
def mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
| mit | 5,996,328,860,083,202,000 | 25.417647 | 83 | 0.643732 | false |
go-bears/Final-Project | weblogo-3.4_rd/corebio/secstruc/__init__.py | 1 | 1920 |
""" Protein secondary structure and associated tools and data.
Constants:
- secstruc_alphabet
-- Secondary structure alphabet: 'HGIEBbTSC _-L?X'
Contains a complete set of secondary structure codes generated by both
STRIDE and DSSP
- secstruc_ehl_alphabet
-- Standard 3 state secondary structure alphabet: EHLX
E : Extended strand
H : Helix
L : Loop
X : Unknown
There are two common ways of reducing the full secondary structure alphabet to
the simpler three letter EHL alphabet. The EHL reduction converts 3/10 and pi
helixes to H (helix) and beta-bridges to strands (E), whereas the FA reduction
converts all non-canonical helixes and strands to L (loop). The FA reduction is
more predictable.
- fa_reduce_secstruc_to_ehl
- reduce_secstruc_to_ehl
Usage :
>>> from corebio.secstruc import *
>>> record = dssp.DsspRecord( open('test_corebio/data/1crn.dssp') )
>>> record.secondary()
' EE SSHHHHHHHHHHHTTT HHHHHHHHS EE SSS GGG '
>>> fa_reduce_secstruc_to_ehl(record.secondary())
'LEELLLHHHHHHHHHHHLLLLLHHHHHHHHLLEELLLLLLLLLLLL'
"""
from __future__ import absolute_import
__all__ = ['dssp', 'stride','secstruc_alphabet','secstruc_ehl_alphabet',
'fa_reduce_secstruc_to_ehl', 'ehl_reduce_secstruc_to_ehl']
from ..seq import Alphabet, Seq
from ..transform import Transform
# ------------------- SECONDARY STRUCTURE ALPHABETS -------------------
secstruc_alphabet = Alphabet("HGIEBbTSC _-L?X")
secstruc_ehl_alphabet = Alphabet("EHLX")
fa_reduce_secstruc_to_ehl = \
Transform( Seq("HGIEBbTSC _-L?X", secstruc_alphabet),
Seq("HLLELLLLLLLLLXX", secstruc_ehl_alphabet) )
ehl_reduce_secstruc_to_ehl = \
Transform( Seq("HGIEBbTSC _-L?X", secstruc_alphabet),
Seq("HHHEEELLLLLLLXX", secstruc_ehl_alphabet) )
| mit | -110,092,327,526,944,400 | 32.684211 | 79 | 0.655729 | false |
hasgeek/funnel | migrations/versions/daeb6753652a_add_profile_protected_and_verified_flags.py | 1 | 1186 | """Add profile protected and verified flags.
Revision ID: daeb6753652a
Revises: 8b46a8a8ca17
Create Date: 2020-11-06 02:57:05.891627
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'daeb6753652a'
down_revision = '8b46a8a8ca17'
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
'profile',
sa.Column(
'is_protected',
sa.Boolean(),
nullable=False,
server_default=sa.sql.expression.false(),
),
)
op.alter_column('profile', 'is_protected', server_default=None)
op.add_column(
'profile',
sa.Column(
'is_verified',
sa.Boolean(),
nullable=False,
server_default=sa.sql.expression.false(),
),
)
op.alter_column('profile', 'is_verified', server_default=None)
op.create_index(
op.f('ix_profile_is_verified'), 'profile', ['is_verified'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_profile_is_verified'), table_name='profile')
op.drop_column('profile', 'is_verified')
op.drop_column('profile', 'is_protected')
| agpl-3.0 | -5,126,532,979,559,593,000 | 23.708333 | 80 | 0.607926 | false |
xen0l/ansible | lib/ansible/cli/__init__.py | 1 | 40009 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(to_text(self.parser.get_version()))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precendence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm vew vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globablly
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper()
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_prompt_method
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False, vault_rekey_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_files):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if vault_rekey_opts:
if (op.new_vault_id and op.new_vault_password_file):
self.parser.error("--new-vault-password-file and --new-vault-id are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
paths = getattr(parser.values, option.dest)
if paths is None:
paths = []
if isinstance(value, string_types):
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
elif isinstance(value, list):
paths[:0] = [unfrackpath(x) for x in value if x]
else:
pass # FIXME: should we raise options error?
setattr(parser.values, option.dest, paths)
@staticmethod
def unfrack_path(option, opt, value, parser):
if value != '-':
setattr(parser.values, option.dest, unfrackpath(value))
else:
setattr(parser.values, option.dest, value)
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None, basedir_opts=False, vault_rekey_opts=False):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_paths, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
help='the vault identity to use')
if vault_rekey_opts:
parser.add_option('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string')
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
help='the new vault identity to use for rekey')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
if basedir_opts:
parser.add_option('--playbook-dir', default=None, dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a subsitute playbook directory."
"This sets the relative path for many features including roles/ group_vars/ etc.")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(self.options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory]
else:
self.options.inventory = C.DEFAULT_HOST_LIST
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
basedir = getattr(options, 'basedir', False)
if basedir:
loader.set_basedir(basedir)
vault_ids = options.vault_ids
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=options.vault_password_files,
ask_vault_pass=options.ask_vault_pass,
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
if hasattr(options, 'basedir'):
if options.basedir:
variable_manager.safe_basedir = True
else:
variable_manager.safe_basedir = True
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0 and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
| gpl-3.0 | -3,480,482,311,501,332,000 | 46.972422 | 160 | 0.573721 | false |
3people/dropship_project | code/Dmok.py | 1 | 5356 | import os
class Board:
SIZE = 15
def __init__(self):
self.board = [['+' for _ in range(self.SIZE)] for _ in range(self.SIZE)]
self.player1 = input("player1의 이름을 입력하시오 : ")
self.player2 = input("player2의 이름을 입력하시오 : ")
self.startgame()
def printboard(self):
"""Print board
"""
os.system("clear")
print(" "+"".join(str(x+1).center(3) for x in range(self.SIZE)))
for x in range(self.SIZE):
print(str(x+1).rjust(2), end = " ")
for y in range(self.SIZE):
print(str(self.board[x][y]).center(3), end="" if y < self.SIZE-1 else "\n")
def startgame(self):
self.board[self.SIZE//2][self.SIZE//2] = "●"
self.printboard()
while True:
while True:
y = int(input(self.player2+"의 x좌표를 입력하시오 : "))
x = int(input(self.player2+"의 y좌표를 입력하시오 : "))
if x in range(1,self.SIZE+1) and y in range(1,self.SIZE+1) and self.board[x-1][y-1] == "+":
break
else :
print("다시입력하세요")
self.board[x-1][y-1] = "○"
self.printboard()
if self.check(x-1,y-1,"○"):
print(self.player2+" win")
break;
while True:
y = int(input(self.player1+"의 x좌표를 입력하시오 : "))
x = int(input(self.player1+"의 y좌표를 입력하시오 : "))
if self.check33(x-1, y-1) :
print("쌍삼입니다\n다시입력하세요")
elif x in range(1,self.SIZE+1) and y in range(1,self.SIZE+1) and self.board[x-1][y-1] == "+":
break
else :
print("다시입력하세요")
self.board[x-1][y-1] = "●"
self.printboard()
if self.check(x-1,y-1,"●"):
print(self.player1+" win")
break;
def check33(self, x, y):
a = []
for n in range(1,5):
a.append(eval("self.check"+str(n)+"("+str(x)+","+str(y)+",\"●\")"))
if a.count(3) >= 2 or a.count(4) >= 2:
return True
else :
return False
def check(self, x, y, mark):
a = []
for n in range(1,5):
a.append(eval("self.check"+str(n)+"("+str(x)+","+str(y)+",\""+mark+"\")"))
if 5 in a or (mark == "○" and True in [x >= 6 for x in a]):
return True
else :
return False
def check1(self, x, y, mark, d = 0):
"""Check row direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check1(x-1, y, mark, 1) + self.check1(x+1, y, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check1(x-1, y, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check1(x+1, y, mark, -1)
else :
return 0
else :
return 0
def check2(self, x, y, mark, d = 0):
"""Check column diretion.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check2(x, y+1, mark, 1) + self.check2(x, y-1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check2(x, y+1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check2(x, y-1, mark, -1)
else :
return 0
else :
return 0
def check3(self, x, y, mark, d = 0):
"""Check left diagonal direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check3(x-1, y-1, mark, 1) + self.check3(x+1, y+1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check3(x-1, y-1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check3(x+1, y+1, mark, -1)
else :
return 0
else :
return 0
def check4(self, x, y, mark, d = 0):
"""Check right diagonal direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check4(x-1, y+1, mark, 1) + self.check4(x+1, y-1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check4(x-1, y+1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check4(x+1, y-1, mark, -1)
else :
return 0
else :
return 0
b = Board()
| mit | -7,690,093,707,582,379,000 | 34.013514 | 109 | 0.412582 | false |
eirmag/weboob | modules/ebonics/backend.py | 1 | 2047 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.capabilities.translate import ICapTranslate, Translation, TranslationFail, LanguageNotSupported
from weboob.tools.backend import BaseBackend
from weboob.tools.browser import StandardBrowser
__all__ = ['EbonicsBackend']
class EbonicsBackend(BaseBackend, ICapTranslate):
NAME = 'ebonics'
MAINTAINER = u'Romain Bignon'
EMAIL = '[email protected]'
VERSION = '0.e'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'English to Ebonics translation service'
BROWSER = StandardBrowser
def translate(self, lan_from, lan_to, text):
if lan_from != 'English' or lan_to != 'Nigger!':
raise LanguageNotSupported()
with self.browser:
data = {'English': text.encode('utf-8')}
doc = self.browser.location('http://joel.net/EBONICS/Translator', urllib.urlencode(data))
try:
text = doc.getroot().cssselect('div.translateform div.bubble1 div.bubblemid')[0].text
except IndexError:
raise TranslationFail()
if text is None:
raise TranslationFail()
translation = Translation(0)
translation.lang_src = unicode(lan_from)
translation.lang_dst = unicode(lan_to)
translation.text = unicode(text).strip()
return translation
| agpl-3.0 | 4,084,087,417,472,269,000 | 33.116667 | 107 | 0.686859 | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py | 1 | 4670 | #!/usr/bin/env python
import networkx as nx
def validate_grid_path(r, c, s, t, p):
assert isinstance(p, list)
assert p[0] == s
assert p[-1] == t
s = ((s - 1) // c, (s - 1) % c)
t = ((t - 1) // c, (t - 1) % c)
assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1
p = [((u - 1) // c, (u - 1) % c) for u in p]
for u in p:
assert 0 <= u[0] < r
assert 0 <= u[1] < c
for u, v in zip(p[:-1], p[1:]):
assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)]
class TestUnweightedPath:
@classmethod
def setup_class(cls):
from networkx import convert_node_labels_to_integers as cnlti
cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
def test_bidirectional_shortest_path(self):
assert (nx.bidirectional_shortest_path(self.cycle, 0, 3) ==
[0, 1, 2, 3])
assert (nx.bidirectional_shortest_path(self.cycle, 0, 4) ==
[0, 6, 5, 4])
validate_grid_path(4, 4, 1, 12, nx.bidirectional_shortest_path(self.grid, 1, 12))
assert (nx.bidirectional_shortest_path(self.directed_cycle, 0, 3) ==
[0, 1, 2, 3])
def test_shortest_path_length(self):
assert nx.shortest_path_length(self.cycle, 0, 3) == 3
assert nx.shortest_path_length(self.grid, 1, 12) == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4
# now with weights
assert nx.shortest_path_length(self.cycle, 0, 3, weight=True) == 3
assert nx.shortest_path_length(self.grid, 1, 12, weight=True) == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight=True) == 4
def test_single_source_shortest_path(self):
p = nx.single_source_shortest_path(self.directed_cycle, 3)
assert p[0] == [3, 4, 5, 6, 0]
p = nx.single_source_shortest_path(self.cycle, 0)
assert p[3] == [0, 1, 2, 3]
p = nx.single_source_shortest_path(self.cycle, 0, cutoff=0)
assert p == {0: [0]}
def test_single_source_shortest_path_length(self):
pl = nx.single_source_shortest_path_length
lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert dict(pl(self.cycle, 0)) == lengths
lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
assert dict(pl(self.directed_cycle, 0)) == lengths
def test_single_target_shortest_path(self):
p = nx.single_target_shortest_path(self.directed_cycle, 0)
assert p[3] == [3, 4, 5, 6, 0]
p = nx.single_target_shortest_path(self.cycle, 0)
assert p[3] == [3, 2, 1, 0]
p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0)
assert p == {0: [0]}
def test_single_target_shortest_path_length(self):
pl = nx.single_target_shortest_path_length
lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert dict(pl(self.cycle, 0)) == lengths
lengths = {0: 0, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1}
assert dict(pl(self.directed_cycle, 0)) == lengths
def test_all_pairs_shortest_path(self):
p = dict(nx.all_pairs_shortest_path(self.cycle))
assert p[0][3] == [0, 1, 2, 3]
p = dict(nx.all_pairs_shortest_path(self.grid))
validate_grid_path(4, 4, 1, 12, p[1][12])
def test_all_pairs_shortest_path_length(self):
l = dict(nx.all_pairs_shortest_path_length(self.cycle))
assert l[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
l = dict(nx.all_pairs_shortest_path_length(self.grid))
assert l[1][16] == 6
def test_predecessor_path(self):
G = nx.path_graph(4)
assert nx.predecessor(G, 0) == {0: [], 1: [0], 2: [1], 3: [2]}
assert nx.predecessor(G, 0, 3) == [2]
def test_predecessor_cycle(self):
G = nx.cycle_graph(4)
pred = nx.predecessor(G, 0)
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
def test_predecessor_cutoff(self):
G = nx.path_graph(4)
p = nx.predecessor(G, 0, 3)
assert not 4 in p
def test_predecessor_target(self):
G = nx.path_graph(4)
p = nx.predecessor(G, 0, 3)
assert p == [2]
p = nx.predecessor(G, 0, 3, cutoff=2)
assert p == []
p, s = nx.predecessor(G, 0, 3, return_seen=True)
assert p == [2]
assert s == 3
p, s = nx.predecessor(G, 0, 3, cutoff=2, return_seen=True)
assert p == []
assert s == -1
| mit | 7,503,324,317,036,150,000 | 38.576271 | 89 | 0.540685 | false |
jdereus/labman | labcontrol/gui/handlers/study.py | 1 | 2270 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from tornado.web import authenticated, HTTPError
from tornado.escape import json_encode
from labcontrol.gui.handlers.base import BaseHandler
from labcontrol.db.study import Study
from labcontrol.db.exceptions import LabControlUnknownIdError
class StudyListingHandler(BaseHandler):
@authenticated
def get(self):
self.render('study_list.html')
class StudyListHandler(BaseHandler):
@authenticated
def get(self):
# Get all arguments that DataTables send us
res = {"data": [
[s['study_id'], s['study_title'], s['study_alias'], s['owner'],
s['num_samples']] for s in Study.list_studies()]}
self.write(res)
self.finish()
class StudyHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
self.write({'study_id': study.id,
'study_title': study.title,
'total_samples': study.num_samples})
except LabControlUnknownIdError:
self.set_status(404)
self.finish()
class StudySamplesHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
term = self.get_argument('term', None)
res = list(study.samples(term, limit=20))
self.write(json_encode(res))
except LabControlUnknownIdError:
self.set_status(404)
self.finish()
class StudySummaryHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
except LabControlUnknownIdError:
raise HTTPError(404, reason="Study %s doesn't exist" % study_id)
study_numbers = study.sample_numbers_summary
self.render('study.html', study_id=study.id,
study_title=study.title, study_numbers=study_numbers)
| bsd-3-clause | -1,625,448,089,754,203,400 | 31.428571 | 78 | 0.586784 | false |
eBay/cronus-agent | agent/agent/lib/security/agentauth.py | 1 | 5600 | #pylint: disable=E1121,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Created on Feb 21, 2014
@author: biyu
'''
from agent.lib.security import UnauthorizedException, invalidAuthHandler
from pylons import request, config
import logging
import re
import base64
from decorator import decorator
from agent.lib import configutil, manifestutil
import os
import traceback
LOG = logging.getLogger(__name__)
def authorize():
'''
docorator for authorize
@parameter inSecurity: bool indicating whether incomming security need to check
'''
def validate(func, self, *args, **kwargs):
''' function that calls authrozing function'''
isAuthEnabled = True
isPkiEnabled = False
authPassed = False
try:
appGlobal = config['pylons.app_globals']
isAuthEnabled = configutil.getConfigAsBool('basicauth.local')
isPkiEnabled = (appGlobal.encryptedtokens and configutil.getConfigAsBool('pkiauth_enabled'))
except BaseException as excep:
LOG.error('Error loading auth config %s - %s' % (str(excep), traceback.format_exc(2)))
if isAuthEnabled:
if 'Authorization' not in request.headers and 'authorization' not in request.headers:
return invalidAuthHandler('Authorization header missing', {})
message = None
result = {}
# base authentication
if not isPkiEnabled:
token = ('%s:%s' % (configutil.getConfig('username.local'), configutil.getConfig('password.local')))
try:
isAuthenticated(token)
authPassed = True
except UnauthorizedException:
message = 'Please provide valid username and password'
result['scheme'] = 'base'
if not authPassed:
# pki authentication
token = appGlobal.authztoken
try:
isAuthenticated(token)
authPassed = True
except UnauthorizedException:
if isPkiEnabled:
result['scheme'] = 'pki'
user = request.headers['AuthorizationUser'] if 'AuthorizationUser' in request.headers else 'agent'
pubKey = '%s.cert' % user
if pubKey in appGlobal.encryptedtokens:
message = appGlobal.encryptedtokens[pubKey]
result['key'] = appGlobal.encryptedtokens[pubKey]
else:
message = 'Unknown AuthroizationUser %s' % user
return invalidAuthHandler(message, result)
return func(self, *args, **kwargs)
return decorator(validate)
def isAuthenticated(token):
''' check whether user name and password are right '''
message = 'Please provide valid username and password'
inHeader = None
try:
if 'authorization' in request.headers:
inHeader = request.headers['authorization']
elif 'Authorization' in request.headers:
inHeader = request.headers['Authorization']
if inHeader is not None:
base64string = base64.encodestring(token)[:-1]
match = re.match(r'\s*Basic\s*(?P<auth>\S*)$', inHeader)
if match is not None and match.group('auth') == base64string:
return True
raise UnauthorizedException(message + " Header:" + str(request.headers))
except:
raise UnauthorizedException(message + " Header:" + str(request.headers))
def buildTokenCache(authztoken):
""" build in memory cache for security tokens """
# find all pub keys in agent and encrypt the security token with them
appGlobal = config['pylons.app_globals']
pubKeyDir = os.path.join(manifestutil.manifestPath('agent'), 'agent', 'cronus', 'keys')
LOG.info('key directory %s' % pubKeyDir)
if os.path.exists(pubKeyDir):
try:
import pki
from M2Crypto import X509
pubKeyFiles = [f for f in os.listdir(pubKeyDir) if re.match(r'.*\.cert', f)]
LOG.info('key files %s' % pubKeyFiles)
for pubKeyFile in pubKeyFiles:
# reload the certs from disk
certf = open(os.path.join(pubKeyDir, pubKeyFile), 'r')
ca_cert_content = certf.read()
certf.close()
cert = X509.load_cert_string(ca_cert_content)
# pub = RSA.load_pub_key(os.path.join(pubKeyDir, pubKeyFile))
encryptedToken = pki.encrypt(cert.get_pubkey(), authztoken)
appGlobal.encryptedtokens[pubKeyFile] = encryptedToken
LOG.info('token %s=%s' % (pubKeyFile, encryptedToken))
except BaseException as excep:
LOG.error('Error loading pki keys %s - %s' % (str(excep), traceback.format_exc(2)))
| apache-2.0 | -3,431,015,109,863,174,000 | 37.895833 | 124 | 0.601964 | false |
Andproject/tools_protobuf | python/google/protobuf/internal/reflection_test.py | 1 | 69911 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = '[email protected] (Will Robinson)'
import operator
import unittest
# TODO(robinson): When we split this test in two, only some of these imports
# will be necessary in each test.
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class RefectionTest(unittest.TestCase):
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Finally, ensure that modifications to the old composite field object
# don't have any effect on the parent.
#
# (NOTE that when we clear the composite field in the parent, we actually
# don't recursively clear down the tree. Instead, we just disconnect the
# cleared composite from the tree.)
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.append('foo')
proto.repeated_string.append('bar')
proto.repeated_string.append('baz')
proto.optional_int32 = 21
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual('', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual('world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
# TODO(robinson): Add type-safety check for enums.
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
setattr(pb, field_name, expected_max)
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
TestMinAndMaxIntegers('optional_nested_enum', -(1 << 31), (1 << 31) - 1)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5);
proto.repeated_int32.append(10);
self.assertTrue(proto.repeated_int32)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual([5, 10], proto.repeated_int32)
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(10, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 10], result)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertTrue(m0 is proto.repeated_nested_message[0])
self.assertTrue(m1 is proto.repeated_nested_message[1])
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertEqual(2, len(result))
self.assertTrue(m0 is result[0])
self.assertTrue(m1 is result[1])
# Test item deletion.
del proto.repeated_nested_message[0]
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertTrue(m1 is proto.repeated_nested_message[0])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
def testHandWrittenReflection(self):
# TODO(robinson): We probably need a better way to specify
# protocol types by hand. But then again, this isn't something
# we expect many people to do. Hmm.
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertTrue(foreign is toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(not toplevel.HasField('submessage'))
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testClear(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(proto.IsInitialized())
proto = unittest_pb2.TestAllExtensions()
self.assertTrue(proto.IsInitialized())
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized())
proto.a = proto.b = proto.c = 2
self.assertTrue(proto.IsInitialized())
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertTrue(proto.IsInitialized())
proto.optional_message.a = 1
self.assertFalse(proto.IsInitialized())
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertTrue(proto.IsInitialized())
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertFalse(proto.IsInitialized())
message1.a = message1.b = message1.c = 0
self.assertTrue(proto.IsInitialized())
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertFalse(proto.IsInitialized())
message1.a = 1
message1.b = 1
message1.c = 1
self.assertFalse(proto.IsInitialized())
message2.a = 2
message2.b = 2
message2.c = 2
self.assertTrue(proto.IsInitialized())
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertFalse(proto.IsInitialized())
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertTrue(proto.IsInitialized())
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
# Values of type 'str' are also accepted as long as they can be encoded in
# UTF-8.
self.assertEqual(type(proto.optional_string), str)
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', str('a\x80a'))
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
raw.MergeFromString(serialized)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actually bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
message2.MergeFromString(raw.item[0].message)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# How about if the bytes on the wire aren't a valid UTF-8 encoded string.
bytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * '\xff')
self.assertRaises(UnicodeDecodeError, message2.MergeFromString, bytes)
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(unittest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(unittest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
# TODO(robinson): Replace next two lines with method
# to set the "has" bit without changing the value,
# if/when such a method exists.
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(unittest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(unittest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(unittest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
# TODO(robinson): We need cross-language serialization consistency tests.
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(unittest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
second_proto.MergeFromString(serialized)
self.assertEqual(first_proto, second_proto)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = decoder.Decoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
second_proto.MergeFromString(serialized)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
raw.MergeFromString(serialized)
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.MergeFromString(raw.item[0].message)
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
message2.MergeFromString(raw.item[1].message)
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
proto2.MergeFromString(serialized)
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
proto.MergeFromString(serialized)
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
proto2.MergeFromString(serialized)
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
proto2.MergeFromString(serialized)
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class, ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Required field protobuf_unittest.TestRequired.a is not set.')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Required field protobuf_unittest.TestRequired.b is not set.')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Required field protobuf_unittest.TestRequired.c is not set.')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
proto2.MergeFromString(serialized)
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
proto2.ParseFromString(partial)
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
class OptionsTest(unittest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
class UtilityTest(unittest.TestCase):
def testImergeSorted(self):
ImergeSorted = reflection._ImergeSorted
# Various types of emptiness.
self.assertEqual([], list(ImergeSorted()))
self.assertEqual([], list(ImergeSorted([])))
self.assertEqual([], list(ImergeSorted([], [])))
# One nonempty list.
self.assertEqual([1, 2, 3], list(ImergeSorted([1, 2, 3])))
self.assertEqual([1, 2, 3], list(ImergeSorted([1, 2, 3], [])))
self.assertEqual([1, 2, 3], list(ImergeSorted([], [1, 2, 3])))
# Merging some nonempty lists together.
self.assertEqual([1, 2, 3], list(ImergeSorted([1, 3], [2])))
self.assertEqual([1, 2, 3], list(ImergeSorted([1], [3], [2])))
self.assertEqual([1, 2, 3], list(ImergeSorted([1], [3], [2], [])))
# Elements repeated across component iterators.
self.assertEqual([1, 2, 2, 3, 3],
list(ImergeSorted([1, 2], [3], [2, 3])))
# Elements repeated within an iterator.
self.assertEqual([1, 2, 2, 3, 3],
list(ImergeSorted([1, 2, 2], [3], [3])))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,969,223,157,646,569,000 | 40.338261 | 80 | 0.715735 | false |
ctjacobs/pyqso | pyqso/log.py | 1 | 15133 | #!/usr/bin/env python3
# Copyright (C) 2013-2017 Christian Thomas Jacobs.
# This file is part of PyQSO.
# PyQSO is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyQSO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyQSO. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import logging
import sqlite3 as sqlite
from pyqso.adif import AVAILABLE_FIELD_NAMES_ORDERED
class Log(Gtk.ListStore):
""" A single log inside of the whole logbook. A Log object can store multiple records. This is """
def __init__(self, connection, name):
""" Set up a new Log object.
:arg connection: An sqlite database connection.
:arg str name: The name of the log (i.e. the database table name).
"""
# The ListStore constructor needs to know the data types of the columns.
# The index is always an integer. We will assume the fields are strings.
data_types = [int] + [str]*len(AVAILABLE_FIELD_NAMES_ORDERED)
# Call the constructor of the super class (Gtk.ListStore).
Gtk.ListStore.__init__(self, *data_types)
self.connection = connection
self.name = name
return
def populate(self):
""" Remove everything in the Gtk.ListStore that is rendered already (via the TreeView), and start afresh. """
logging.debug("Populating '%s'..." % self.name)
self.add_missing_db_columns()
self.clear()
try:
records = self.records
for r in records:
liststore_entry = [r["id"]]
for field_name in AVAILABLE_FIELD_NAMES_ORDERED:
# Note: r may contain column names that are not in AVAILABLE_FIELD_NAMES_ORDERED,
# so we need to loop over and only select those that are, since the ListStore will
# expect a specific number of columns.
liststore_entry.append(r[field_name])
self.append(liststore_entry)
logging.debug("Finished populating '%s'." % self.name)
except sqlite.Error as e:
logging.error("Could not populate '%s' because of a database error." % self.name)
logging.exception(e)
return
def add_missing_db_columns(self):
""" Check whether each field name in AVAILABLE_FIELD_NAMES_ORDERED is in the database table. If not, add it
(with all entries being set to an empty string initially).
:raises sqlite.Error, IndexError: If the existing database column names could not be obtained, or missing column names could not be added.
"""
logging.debug("Adding any missing database columns...")
# Get all the column names in the current database table.
column_names = []
try:
with self.connection:
c = self.connection.cursor()
c.execute("PRAGMA table_info(%s)" % self.name)
result = c.fetchall()
for t in result:
column_names.append(t[1].upper())
except (sqlite.Error, IndexError) as e:
logging.exception(e)
logging.error("Could not obtain the database column names.")
return
for field_name in AVAILABLE_FIELD_NAMES_ORDERED:
if(not(field_name in column_names)):
try:
with self.connection:
c.execute("ALTER TABLE %s ADD COLUMN %s TEXT DEFAULT \"\"" % (self.name, field_name.lower()))
except sqlite.Error as e:
logging.exception(e)
logging.error("Could not add the missing database column '%s'." % field_name)
pass
logging.debug("Finished adding any missing database columns.")
return
def add_record(self, fields_and_data):
""" Add a record (or multiple records) to the log.
:arg fields_and_data: A list of dictionaries (or possibly just a single dictionary), with each dictionary representing a single QSO, to be added to the log.
"""
logging.debug("Adding record(s) to log...")
# If a dictionary is given, assume that we only have one record to add.
if isinstance(fields_and_data, dict):
fields_and_data = [fields_and_data]
with self.connection:
c = self.connection.cursor()
# Get all the column names in the current database table.
c.execute("PRAGMA table_info(%s)" % self.name)
column_names = c.fetchall()
# Get the index/rowid of the last inserted record in the database.
c.execute("SELECT max(id) FROM %s" % self.name)
last_index = c.fetchone()[0]
if last_index is None:
# Assume no records are currently present.
last_index = 0
# A list of all the database entries, to be inserted in one go into the database.
database_entries = []
# Construct the SQL query.
query = "INSERT INTO %s VALUES (NULL" % self.name
for i in range(len(column_names)-1): # -1 here because we don't want to count the database's 'id' column, since this is autoincremented.
query = query + ",?"
query = query + ")"
# Gather all the records (making sure that the entries of each record are in the correct order).
for r in range(len(fields_and_data)):
# What if the database columns are not necessarily in the same order as (or even exist in) AVAILABLE_FIELD_NAMES_ORDERED?
# PyQSO handles this here, but needs a separate list (called database_entry) to successfully perform the SQL query.
database_entry = []
for t in column_names:
column_name = str(t[1]) # 't' here is a tuple
if((column_name.upper() in AVAILABLE_FIELD_NAMES_ORDERED) and (column_name.upper() in list(fields_and_data[r].keys()))):
database_entry.append(fields_and_data[r][column_name.upper()])
else:
if(column_name != "id"): # Ignore the index/rowid field. This is a special case since it's not in AVAILABLE_FIELD_NAMES_ORDERED.
database_entry.append("")
database_entries.append(database_entry)
# Insert records in the database.
with self.connection:
c = self.connection.cursor()
c.executemany(query, database_entries)
# Get the indices/rowids of the newly-inserted records.
query = "SELECT id FROM %s WHERE id > %s ORDER BY id ASC" % (self.name, last_index)
c.execute(query)
inserted = c.fetchall()
# Check that the number of records we wanted to insert is the same as the number of records successfully inserted.
assert(len(inserted) == len(database_entries))
# Add the records to the ListStore as well.
for r in range(len(fields_and_data)):
liststore_entry = [inserted[r]["id"]] # Add the record's index.
field_names = AVAILABLE_FIELD_NAMES_ORDERED
for i in range(0, len(field_names)):
if(field_names[i] in list(fields_and_data[r].keys())):
liststore_entry.append(fields_and_data[r][field_names[i]])
else:
liststore_entry.append("")
self.append(liststore_entry)
logging.debug("Successfully added the record(s) to the log.")
return
def delete_record(self, index, iter=None):
""" Delete a specified record from the log. The corresponding record is also deleted from the Gtk.ListStore data structure.
:arg int index: The index of the record in the SQL database.
:arg iter: The iterator pointing to the record to be deleted in the Gtk.ListStore. If the default value of None is used, only the database entry is deleted and the corresponding Gtk.ListStore is left alone.
:raises sqlite.Error, IndexError: If the record could not be deleted.
"""
logging.debug("Deleting record from log...")
# Delete the selected row in database.
with self.connection:
c = self.connection.cursor()
query = "DELETE FROM %s" % self.name
c.execute(query+" WHERE id=?", [index])
# Delete the selected row in the Gtk.ListStore.
if(iter is not None):
self.remove(iter)
logging.debug("Successfully deleted the record from the log.")
return
def edit_record(self, index, field_name, data, iter=None, column_index=None):
""" Edit a specified record by replacing the current data in a specified field with the data provided.
:arg int index: The index of the record in the SQL database.
:arg str field_name: The name of the field whose data should be modified.
:arg str data: The data that should replace the current data in the field.
:arg iter: The iterator pointing to the record to be edited in the Gtk.ListStore. If the default value of None is used, only the database entry is edited and the corresponding Gtk.ListStore is left alone.
:arg column_index: The index of the column in the Gtk.ListStore to be edited. If the default value of None is used, only the database entry is edited and the corresponding Gtk.ListStore is left alone.
:raises sqlite.Error, IndexError: If the record could not be edited.
"""
logging.debug("Editing field '%s' in record %d..." % (field_name, index))
with self.connection:
c = self.connection.cursor()
query = "UPDATE %s SET %s" % (self.name, field_name)
query = query + "=? WHERE id=?"
c.execute(query, [data, index]) # First update the SQL database...
if(iter is not None and column_index is not None):
self.set(iter, column_index, data) # ...and then the ListStore.
logging.debug("Successfully edited field '%s' in record %d in the log." % (field_name, index))
return
def remove_duplicates(self):
""" Remove any duplicate records from the log.
:returns: The total number of duplicates, and the number of duplicates that were successfully removed. Hopefully these will be the same.
:rtype: tuple
"""
duplicates = self.get_duplicates()
if(len(duplicates) == 0):
return (0, 0) # Nothing to do here.
removed = 0 # Count the number of records that are removed. Hopefully this will be the same as len(duplicates).
iter = self.get_iter_first() # Start with the first row in the log.
prev = iter # Keep track of the previous iter (initially this will be the same as the first row in the log).
while iter is not None:
row_index = self.get_value(iter, 0) # Get the index.
if(row_index in duplicates): # Is this a duplicate row? If so, delete it.
self.delete_record(row_index, iter)
removed += 1
iter = prev # Go back to the iter before the record that was just removed and continue from there.
continue
prev = iter
iter = self.iter_next(iter) # Move on to the next row, until iter_next returns None.
return (len(duplicates), removed)
def rename(self, new_name):
""" Rename the log.
:arg str new_name: The new name for the log.
:returns: True if the renaming process is successful. Otherwise returns False.
:rtype: bool
"""
try:
with self.connection:
# First try to alter the table name in the database.
c = self.connection.cursor()
query = "ALTER TABLE %s RENAME TO %s" % (self.name, new_name)
c.execute(query)
# If the table name change was successful, then change the name attribute of the Log object too.
self.name = new_name
success = True
except sqlite.Error as e:
logging.exception(e)
success = False
return success
def get_duplicates(self):
""" Find the duplicates in the log, based on the CALL, QSO_DATE, and TIME_ON fields.
:returns: A list of indices/ids corresponding to the duplicate records.
:rtype: list
"""
duplicates = []
try:
with self.connection:
c = self.connection.cursor()
c.execute(
"""SELECT id FROM %s WHERE id NOT IN
(
SELECT MIN(id) FROM %s GROUP BY call, qso_date, time_on
)""" % (self.name, self.name))
result = c.fetchall()
for index in result:
duplicates.append(index[0]) # Get the integer from inside the tuple.
duplicates.sort() # These indices should monotonically increasing, but let's sort the list just in case.
except (sqlite.Error, IndexError) as e:
logging.exception(e)
return duplicates
def get_record_by_index(self, index):
""" Return a record with a given index in the log.
:arg int index: The index of the record in the SQL database.
:returns: The desired record, represented by a dictionary of field-value pairs.
:rtype: dict
:raises sqlite.Error: If the record could not be retrieved from the database.
"""
with self.connection:
c = self.connection.cursor()
query = "SELECT * FROM %s WHERE id=?" % self.name
c.execute(query, [index])
return c.fetchone()
@property
def records(self):
""" Return a list of all the records in the log.
:returns: A list of all the records in the log. Each record is represented by a dictionary.
:rtype: dict
:raises sqlite.Error: If the records could not be retrieved from the database.
"""
with self.connection:
c = self.connection.cursor()
c.execute("SELECT * FROM %s" % self.name)
return c.fetchall()
@property
def record_count(self):
""" Return the total number of records in the log.
:returns: The total number of records in the log.
:rtype: int
:raises sqlite.Error: If the record count could not be determined due to a database error.
"""
with self.connection:
c = self.connection.cursor()
c.execute("SELECT Count(*) FROM %s" % self.name)
return c.fetchone()[0]
| gpl-3.0 | -7,163,945,879,374,536,000 | 44.308383 | 214 | 0.606489 | false |
kloper/pato | python/test/__init__.py | 1 | 2750 | # -*- python -*-
"""@file
@brief Common test stuff
Copyright (c) 2014-2015 Dimitry Kloper <[email protected]>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import os
import sys
import datetime
import logging
import logging.config
logging.config.dictConfig(
{ 'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s:%(levelname)s:'
'%(filename)s:%(lineno)d: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': "logging.StreamHandler",
'stream': sys.stdout,
'formatter': 'standard'
},
'file': {
'level': 'DEBUG',
'class': "logging.FileHandler",
'filename': "{}-{}.log".\
format( __file__, datetime.datetime.now().\
strftime("%d%m%Y-%H%M%S")),
'formatter': 'standard'
},
},
'loggers': {
'default': {
'handlers': ['default', 'file'],
'level': 'DEBUG',
'propagate': False
},
},
}
)
logger = logging.getLogger('default')
| bsd-2-clause | 4,933,677,889,533,100,000 | 32.536585 | 70 | 0.637455 | false |
jalr/privacyidea | privacyidea/lib/tokens/yubicotoken.py | 1 | 7718 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# 2016-04-04 Cornelius Kölbel <[email protected]>
# Use central yubico_api_signature function
# 2015-01-28 Rewrite during flask migration
# Change to use requests module
# Cornelius Kölbel <[email protected]>
#
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """
This is the implementation of the yubico token type.
Authentication requests are forwarded to the Yubico Cloud service YubiCloud.
The code is tested in tests/test_lib_tokens_yubico
"""
import logging
from privacyidea.lib.decorators import check_token_locked
import traceback
import requests
from privacyidea.api.lib.utils import getParam
from privacyidea.lib.config import get_from_config
from privacyidea.lib.log import log_with
from privacyidea.lib.tokenclass import TokenClass
from privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,
yubico_api_signature)
import os
import binascii
YUBICO_LEN_ID = 12
YUBICO_LEN_OTP = 44
YUBICO_URL = "https://api.yubico.com/wsapi/2.0/verify"
DEFAULT_CLIENT_ID = 20771
DEFAULT_API_KEY = "9iE9DRkPHQDJbAFFC31/dum5I54="
optional = True
required = False
log = logging.getLogger(__name__)
class YubicoTokenClass(TokenClass):
def __init__(self, db_token):
TokenClass.__init__(self, db_token)
self.set_type(u"yubico")
self.tokenid = ""
@staticmethod
def get_class_type():
return "yubico"
@staticmethod
def get_class_prefix():
return "UBCM"
@staticmethod
@log_with(log)
def get_class_info(key=None, ret='all'):
"""
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: dict or string
"""
res = {'type': 'yubico',
'title': 'Yubico Token',
'description': 'Yubikey Cloud mode: Forward authentication '
'request to YubiCloud.',
'init': {'page': {'html': 'yubicotoken.mako',
'scope': 'enroll', },
'title': {'html': 'yubicotoken.mako',
'scope': 'enroll.title'}
},
'config': {'page': {'html': 'yubicotoken.mako',
'scope': 'config'},
'title': {'html': 'yubicotoken.mako',
'scope': 'config.title'}
},
'user': ['enroll'],
# This tokentype is enrollable in the UI for...
'ui_enroll': ["admin", "user"],
'policy' : {},
}
if key is not None and key in res:
ret = res.get(key)
else:
if ret == 'all':
ret = res
return ret
def update(self, param):
tokenid = getParam(param, "yubico.tokenid", required)
if len(tokenid) < YUBICO_LEN_ID:
log.error("The tokenid needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
raise Exception("The Yubikey token ID needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
if len(tokenid) > YUBICO_LEN_ID:
tokenid = tokenid[:YUBICO_LEN_ID]
self.tokenid = tokenid
# overwrite the maybe wrong lenght given at the command line
param['otplen'] = 44
TokenClass.update(self, param)
self.add_tokeninfo("yubico.tokenid", self.tokenid)
@log_with(log)
@check_token_locked
def check_otp(self, anOtpVal, counter=None, window=None, options=None):
"""
Here we contact the Yubico Cloud server to validate the OtpVal.
"""
res = -1
apiId = get_from_config("yubico.id", DEFAULT_CLIENT_ID)
apiKey = get_from_config("yubico.secret", DEFAULT_API_KEY)
yubico_url = get_from_config("yubico.url", YUBICO_URL)
if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:
log.warning("Usage of default apiKey or apiId not recommended!")
log.warning("Please register your own apiKey and apiId at "
"yubico website!")
log.warning("Configure of apiKey and apiId at the "
"privacyidea manage config menu!")
tokenid = self.get_tokeninfo("yubico.tokenid")
if len(anOtpVal) < 12:
log.warning("The otpval is too short: {0!r}".format(anOtpVal))
elif anOtpVal[:12] != tokenid:
log.warning("The tokenid in the OTP value does not match "
"the assigned token!")
else:
nonce = binascii.hexlify(os.urandom(20))
p = {'nonce': nonce,
'otp': anOtpVal,
'id': apiId}
# Also send the signature to the yubico server
p["h"] = yubico_api_signature(p, apiKey)
try:
r = requests.post(yubico_url,
data=p)
if r.status_code == requests.codes.ok:
response = r.text
elements = response.split()
data = {}
for elem in elements:
k, v = elem.split("=", 1)
data[k] = v
result = data.get("status")
return_nonce = data.get("nonce")
# check signature:
signature_valid = yubico_check_api_signature(data, apiKey)
if signature_valid:
log.error("The hash of the return from the Yubico "
"Cloud server does not match the data!")
if nonce != return_nonce:
log.error("The returned nonce does not match "
"the sent nonce!")
if result == "OK":
res = 1
if nonce != return_nonce or not signature_valid:
log.warning("Nonce and Hash do not match.")
res = -2
else:
# possible results are listed here:
# https://github.com/Yubico/yubikey-val/wiki/ValidationProtocolV20
log.warning("failed with {0!r}".format(result))
except Exception as ex:
log.error("Error getting response from Yubico Cloud Server"
" (%r): %r" % (yubico_url, ex))
log.debug("{0!s}".format(traceback.format_exc()))
return res
| agpl-3.0 | -2,784,637,375,835,404,300 | 36.818627 | 108 | 0.552301 | false |
jelly/calibre | src/calibre/utils/run_tests.py | 2 | 5027 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import unittest, functools, os, importlib, zipfile
from calibre.utils.monotonic import monotonic
def no_endl(f):
@functools.wraps(f)
def func(*args, **kwargs):
self = f.__self__
orig = self.stream.writeln
self.stream.writeln = self.stream.write
try:
return f(*args, **kwargs)
finally:
self.stream.writeln = orig
return func
class TestResult(unittest.TextTestResult):
def __init__(self, *args, **kwargs):
super(TestResult, self).__init__(*args, **kwargs)
self.start_time = {}
for x in ('Success', 'Error', 'Failure', 'Skip', 'ExpectedFailure', 'UnexpectedSuccess'):
x = 'add' + x
setattr(self, x, no_endl(getattr(self, x)))
self.times = {}
def startTest(self, test):
self.start_time[test] = monotonic()
return super(TestResult, self).startTest(test)
def stopTest(self, test):
orig = self.stream.writeln
self.stream.writeln = self.stream.write
super(TestResult, self).stopTest(test)
elapsed = monotonic()
elapsed -= self.start_time.get(test, elapsed)
self.times[test] = elapsed
self.stream.writeln = orig
self.stream.writeln(' [%.1g s]' % elapsed)
def stopTestRun(self):
super(TestResult, self).stopTestRun()
if self.wasSuccessful():
tests = sorted(self.times, key=self.times.get, reverse=True)
slowest = ['%s [%g s]' % (t.id(), self.times[t]) for t in tests[:3]]
if len(slowest) > 1:
self.stream.writeln('\nSlowest tests: %s' % ' '.join(slowest))
def find_tests_in_dir(path, excludes=('main.py',)):
if not os.path.exists(path) and '.zip' in path:
idx = path.rfind('.zip')
zf = path[:idx+4]
prefix = os.path.relpath(path, zf).replace(os.sep, '/')
package = prefix.replace('/', '.')
with zipfile.ZipFile(zf) as f:
namelist = f.namelist()
items = [i for i in namelist if i.startswith(prefix) and i.count('/') == prefix.count('/') + 1]
else:
d = os.path.dirname
base = d(d(d(os.path.abspath(__file__))))
package = os.path.relpath(path, base).replace(os.sep, '/').replace('/', '.')
items = os.listdir(path)
suits = []
for x in items:
if x.endswith('.py') and x not in excludes:
m = importlib.import_module(package + '.' + x.partition('.')[0])
suits.append(unittest.defaultTestLoader.loadTestsFromModule(m))
return unittest.TestSuite(suits)
def itertests(suite):
stack = [suite]
while stack:
suite = stack.pop()
for test in suite:
if isinstance(test, unittest.TestSuite):
stack.append(test)
continue
if test.__class__.__name__ == 'ModuleImportFailure':
raise Exception('Failed to import a test module: %s' % test)
yield test
def init_env():
from calibre.utils.config_base import reset_tweaks_to_default
from calibre.ebooks.metadata.book.base import reset_field_metadata
reset_tweaks_to_default()
reset_field_metadata()
def filter_tests(suite, test_ok):
ans = unittest.TestSuite()
added = set()
for test in itertests(suite):
if test_ok(test) and test not in added:
ans.addTest(test)
added.add(test)
return ans
def filter_tests_by_name(suite, *names):
names = {x if x.startswith('test_') else 'test_' + x for x in names}
def q(test):
return test._testMethodName in names
return filter_tests(suite, q)
def filter_tests_by_module(suite, *names):
names = frozenset(names)
def q(test):
m = test.__class__.__module__.rpartition('.')[-1]
return m in names
return filter_tests(suite, q)
def run_tests(find_tests, verbosity=4):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs='?', default=None,
help='The name of the test to run, for e.g. writing.WritingTest.many_many_basic or .many_many_basic for a shortcut')
args = parser.parse_args()
tests = find_tests()
if args.name:
if args.name.startswith('.'):
tests = filter_tests_by_name(tests, args.name[1:])
else:
tests = filter_tests_by_module(tests, args.name)
if not tests._tests:
raise SystemExit('No test named %s found' % args.name)
run_cli(tests, verbosity)
def run_cli(suite, verbosity=4):
r = unittest.TextTestRunner
r.resultclass = unittest.TextTestResult if verbosity < 2 else TestResult
init_env()
result = r(verbosity=verbosity).run(suite)
if not result.wasSuccessful():
raise SystemExit(1)
| gpl-3.0 | -3,584,374,407,453,739,000 | 32.738255 | 140 | 0.597573 | false |
cachance7/BattleQuip | battlequip/util.py | 1 | 2470 | import collections
import re
def namedtuple_with_defaults(typename, field_names, default_values=[]):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
# Immutable battleship coordinate class
def _coord_as_string(self):
return chr(self.row + 65) + str(self.col + 1)
Coord = namedtuple_with_defaults('Coord', ['row', 'col'])
Coord.__str__ = _coord_as_string
Status = namedtuple_with_defaults('Status', ['game_status', 'my_turn'])
Attack = namedtuple_with_defaults('Attack', ['coord', 'hit', 'sunk'])
class InvalidCoordException(Exception):
def __init__(self, message):
super(InvalidCoordException, self).__init__()
self.message = message
class InvalidPositionException(Exception):
def __init__(self, message):
super(InvalidPositionException, self).__init__()
self.message = message
def make_coord(*raw_coord):
if len(raw_coord) == 1:
return _make_coord(raw_coord[0])
elif len(raw_coord) == 2:
return _make_coord(raw_coord)
def _make_coord(raw_coord):
if isinstance(raw_coord, Coord):
return raw_coord
elif isinstance(raw_coord, tuple):
# coord tuple must correspond to zero-based matrix (row, column)
if len(raw_coord) < 2:
raise InvalidCoordException("coord tuple must have 2 elements")
elif isinstance(raw_coord[0], str):
return make_coord(raw_coord[0] + str(raw_coord[1]))
return Coord(raw_coord[0], raw_coord[1])
elif isinstance(raw_coord, str):
# coord string is alpha & 1-based like 'B3' or 'c10'
if len(raw_coord) < 2:
raise InvalidCoordException("coord string must have 2+ elements")
row = raw_coord[0]
col = raw_coord[1:]
if re.match('[a-zA-Z]', row):
row = ord(row.upper()) - 65
else:
raise InvalidCoordException("coord elm 1 must be one alpha char")
try:
col = int(col) - 1
if col < 0:
raise Error
except:
raise InvalidCoordException("coord elm 2 must be column number >= 1")
return Coord(row, col)
else:
raise InvalidCoordException("Invalid format: " + type(raw_coord))
| mit | -7,643,095,019,502,245,000 | 33.305556 | 81 | 0.615385 | false |
m16Takahiro/kakeibo | kakeibo/settings.py | 1 | 3863 | """
Django settings for kakeibo project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
try:
with open(BASE_DIR + '/config/secret.json', 'r') as jsonfile:
dict_ = json.load(jsonfile)
SECRET_KEY = dict_['secret_key']
except FileNotFoundError:
print('安全でないキーを使用します。長期的な稼働は控えてください。')
SECRET_KEY = 'foo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'out_view',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kakeibo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kakeibo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR + '/static/'
LOGIN_URL = '/admin/login/'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CSRF_TRUSTED_ORIGINS = ['lifelog.515hikaru.net']
CORS_REPLACE_HTTPS_REFERER = True
CSRF_COOKIE_DOMAIN = 'lifelog.515hikaru.net'
CORS_ORIGIN_WHITELIST = ('https://lifelog.515hikaru.net',
'lifelog.515hikaru.net', '515hikaru.net', )
| mit | -4,153,100,942,513,246,700 | 25.985816 | 91 | 0.684363 | false |
JulyKikuAkita/PythonPrac | cs15211/PermutationSequence.py | 1 | 9970 | __source__ = 'https://leetcode.com/problems/permutation-sequence/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/permutation-sequence.py
# Time: O(n)
# Space: O(1)
# Math
#
# Description: Leetcode # 60. Permutation Sequence
#
# The set [1,2,3,...,n] contains a total of n! unique permutations.
#
# By listing and labeling all of the permutations in order,
# We get the following sequence (ie, for n = 3):
#
# "123"
# "132"
# "213"
# "231"
# "312"
# "321"
# Given n and k, return the kth permutation sequence.
#
# Note: Given n will be between 1 and 9 inclusive.
#
# Companies
# Twitter
# Related Topics
# Backtracking Math
# Similar Questions
# Next Permutation Permutations
#
import math
import unittest
# Note:
# '''
# ord(c): Given a string of length one, return an integer representing the Unicode code point of the character
# ex: ord('a') = 97
# chr(c): eturn a string of one character whose ASCII code is the integer i. For example, chr(97) returns the string 'a'.
# '''
# Cantor ordering solution
class Solution1:
# @return a string
def getPermutation(self, n, k):
seq, k, fact = "", k - 1, math.factorial(n - 1)
perm = [i for i in xrange(1, n + 1)]
for i in reversed(xrange(n)):
curr = perm[k / fact]
#print i, curr, k, fact, perm
seq += str(curr)
perm.remove(curr)
if i > 0:
k %= fact
fact /= i
return seq
def getCombination(self, n, k):
parentset = [i for i in xrange(1, n+1)]
result = []
space = 1 << n
for i in xrange(space):
k = i
index = 0
subset = []
while k:
if k & 1 > 0:
subset.append(parentset[index])
k >>= 1
index += 1
result.append(subset)
return result
#
# The idea is as follow:
#
# For permutations of n, the first (n-1)! permutations start with 1,
# next (n-1)! ones start with 2, ... and so on.
# And in each group of (n-1)! permutations, the first (n-2)! permutations start with the smallest remaining number, ...
#
# take n = 3 as an example, the first 2 (that is, (3-1)! )
# permutations start with 1, next 2 start with 2 and last 2 start with 3.
# For the first 2 permutations (123 and 132), the 1st one (1!) starts with 2,
# which is the smallest remaining number (2 and 3).
# So we can use a loop to check the region that the sequence number falls in and get the starting digit.
# Then we adjust the sequence number and continue.
#
# 24ms 74.01%
class Solution:
# @param {integer} n
# @param {integer} k
# @return {string}
def getPermutation(self, n, k):
numbers = range(1, n+1)
permutation = ''
k -= 1
while n > 0:
n -= 1
# get the index of current digit
index, k = divmod(k, math.factorial(n))
permutation += str(numbers[index])
# remove handled number
numbers.remove(numbers[index])
return permutation
class TestMethods(unittest.TestCase):
def test_Local(self):
#test
#test = SolutionOther()
#print test.getPermutation(4,2)
print Solution().getPermutation(3, 2)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
"Explain-like-I'm-five" Java Solution in O(n)
I'm sure somewhere can be simplified so it'd be nice if anyone can let me know. The pattern was that:
say n = 4, you have {1, 2, 3, 4}
If you were to list out all the permutations you have
1 + (permutations of 2, 3, 4)
2 + (permutations of 1, 3, 4)
3 + (permutations of 1, 2, 4)
4 + (permutations of 1, 2, 3)
We know how to calculate the number of permutations of n numbers... n!
So each of those with permutations of 3 numbers means there are 6 possible permutations.
Meaning there would be a total of 24 permutations in this particular one.
So if you were to look for the (k = 14) 14th permutation, it would be in the
3 + (permutations of 1, 2, 4) subset.
To programmatically get that, you take k = 13 (subtract 1 because of things always starting at 0)
and divide that by the 6 we got from the factorial, which would give you the index of the number you want.
In the array {1, 2, 3, 4}, k/(n-1)! = 13/(4-1)! = 13/3! = 13/6 = 2. The array {1, 2, 3, 4} has a value of 3 at index 2.
So the first number is a 3.
Then the problem repeats with less numbers.
The permutations of {1, 2, 4} would be:
1 + (permutations of 2, 4)
2 + (permutations of 1, 4)
4 + (permutations of 1, 2)
But our k is no longer the 14th, because in the previous step, we've already eliminated the 12 4-number permutations
starting with 1 and 2. So you subtract 12 from k.. which gives you 1. Programmatically that would be...
k = k - (index from previous) * (n-1)! = k - 2*(n-1)! = 13 - 2*(3)! = 1
In this second step, permutations of 2 numbers has only 2 possibilities, meaning each of the three permutations
listed above a has two possibilities, giving a total of 6. We're looking for the first one,
so that would be in the 1 + (permutations of 2, 4) subset.
Meaning: index to get number from is k / (n - 2)! = 1 / (4-2)! = 1 / 2! = 0.. from {1, 2, 4}, index 0 is 1
so the numbers we have so far is 3, 1... and then repeating without explanations.
{2, 4}
k = k - (index from pervious) * (n-2)! = k - 0 * (n - 2)! = 1 - 0 = 1;
third number's index = k / (n - 3)! = 1 / (4-3)! = 1/ 1! = 1... from {2, 4}, index 1 has 4
Third number is 4
{2}
k = k - (index from pervious) * (n - 3)! = k - 1 * (4 - 3)! = 1 - 1 = 0;
third number's index = k / (n - 4)! = 0 / (4-4)! = 0/ 1 = 0... from {2}, index 0 has 2
Fourth number is 2
Giving us 3142. If you manually list out the permutations using DFS method, it would be 3142. Done!
It really was all about pattern finding.
# 7ms 96.43%
class Solution {
public String getPermutation(int n, int k) {
int pos = 0;
List<Integer> numbers = new ArrayList<>();
int[] factorial = new int[n+1];
StringBuilder sb = new StringBuilder();
// create an array of factorial lookup
int sum = 1;
factorial[0] = 1;
for(int i=1; i<=n; i++){
sum *= i;
factorial[i] = sum;
}
// factorial[] = {1, 1, 2, 6, 24, ... n!}
// create a list of numbers to get indices
for(int i = 1; i <= n; i++){
numbers.add(i);
}
// numbers = {1, 2, 3, 4}
k--;
for(int i = 1; i <= n; i++){
int index = k / factorial[n-i];
sb.append(String.valueOf(numbers.get(index)));
numbers.remove(index);
k -= index * factorial[n-i];
}
return String.valueOf(sb);
}
}
Thought:
The logic is as follows:
for n numbers the permutations can be divided to (n-1)! groups,
for n-1 numbers can be divided to (n-2)! groups, and so on.
Thus k/(n-1)! indicates the index of current number, and k%(n-1)! denotes remaining index for the remaining n-1 numbers.
We keep doing this until n reaches 0, then we get n numbers permutations that is kth.
# 7ms 96.43%
class Solution {
public String getPermutation(int n, int k) {
List<Integer> nums = new ArrayList<>(n);
int factor = 1;
for (int i = 0; i < n; i++) {
nums.add(i + 1);
factor *= i + 1;
}
k--;
k %= factor;
factor /= n;
StringBuilder sb = new StringBuilder();
for (int i = n - 1; i >= 0; i--) {
int curr = k / factor;
sb.append((char) ('0' + nums.get(curr)));
nums.remove(curr);
k %= factor;
factor = i == 0 ? factor : factor / i;
}
return sb.toString();
}
}
# 7ms 96.43%
class Solution {
public String getPermutation(int n, int k) {
ArrayList<Integer> temp = new ArrayList<>();
// a factorial table
int[] factorial = new int[n];
factorial[0] = 1;
for(int i = 1; i < n; i++) {
factorial[i] = factorial[i-1] * i;
}
for(int i = 1; i <= n; i++) {
temp.add(i);
}
return permutation(temp, k, factorial);
}
public String permutation(ArrayList<Integer> temp, int k, int[] factorial) {
// do until list is empty and you return nothing
if (temp.size() == 0) {
return "";
}
int number = (k-1)/factorial[temp.size()-1];
//System.out.println(number);
String s = Integer.toString(temp.get(number));
k = k - number*factorial[temp.size()-1];
temp.remove(number);
return s + permutation(temp, k, factorial);
}
}
# 9ms 70.43%
class Solution {
public String getPermutation(int n, int k) {
StringBuilder sb = new StringBuilder();
boolean[] used = new boolean[n];
k = k - 1; //idx start with 0
int factor = 1; //factor is to refer (n-1)! permutations
for (int i = 2; i < n; i++) {
factor *= i;
}
for (int i = 0; i < n; i++) {
int index = k / factor; //find index of the highest digit in the list
k = k % factor; //update k for every loop
for (int j = 0; j < n; j++) { //compute the insert index
if (used[j] == false) {
if (index == 0) { //when index == 0 j is the index to add new number
used[j] = true;
sb.append((char) ('0' + j + 1));
break;
} else {
index--;
}
}
}
if (i < n - 1) {
//(n - 1)! -> ((n - 1) - 1)! //the first digi has been added, shrink the possibitility of perm
factor = factor / (n - 1 - i);
}
}
return sb.toString();
}
}
''' | apache-2.0 | 6,555,002,077,032,687,000 | 29.215152 | 121 | 0.559679 | false |
ltowarek/budget-supervisor | third_party/saltedge/swagger_client/models/removed_customer_response_data.py | 1 | 3772 | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemovedCustomerResponseData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'deleted': 'bool',
'id': 'int'
}
attribute_map = {
'deleted': 'deleted',
'id': 'id'
}
def __init__(self, deleted=True, id=None): # noqa: E501
"""RemovedCustomerResponseData - a model defined in Swagger""" # noqa: E501
self._deleted = None
self._id = None
self.discriminator = None
if deleted is not None:
self.deleted = deleted
if id is not None:
self.id = id
@property
def deleted(self):
"""Gets the deleted of this RemovedCustomerResponseData. # noqa: E501
:return: The deleted of this RemovedCustomerResponseData. # noqa: E501
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""Sets the deleted of this RemovedCustomerResponseData.
:param deleted: The deleted of this RemovedCustomerResponseData. # noqa: E501
:type: bool
"""
self._deleted = deleted
@property
def id(self):
"""Gets the id of this RemovedCustomerResponseData. # noqa: E501
:return: The id of this RemovedCustomerResponseData. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RemovedCustomerResponseData.
:param id: The id of this RemovedCustomerResponseData. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemovedCustomerResponseData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemovedCustomerResponseData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | -6,301,179,382,600,332,000 | 26.735294 | 86 | 0.552227 | false |
iofun/spider | spider/system/campaigns.py | 1 | 5125 | # -*- coding: utf-8 -*-
'''
Spider campaigns system logic.
'''
# This file is part of spider.
# Distributed under the terms of the last AGPL License.
# The full license is in the file LICENCE, distributed as part of this software.
__author__ = 'Team Machine'
import arrow
import motor
import uuid
import logging
from tornado import gen
from spider.messages import campaigns
#from spider.messages import inbound
from spider.tools import clean_structure, clean_results
class Campaigns(object):
'''
Spider campaigns
'''
@gen.coroutine
def get_campaign_list(self, account, status, page_num):
'''
Get campaign list
'''
page_num = int(page_num)
page_size = self.settings.get('page_size')
campaign_list = []
find_query = {'account':account}
logging.info(status)
if status != 'all':
find_query['status'] = status
query = self.db.campaigns.find(find_query, {'_id':0})
q = query.sort([('_id', -1)]).skip(int(page_num) * page_size).limit(page_size)
try:
while (yield query.fetch_next):
campaign = q.next_object()
campaign_list.append(campaign)
except Exception, e:
logging.exception(e)
raise gen.Return(e)
finally:
raise gen.Return(campaign_list)
@gen.coroutine
def get_campaign(self, account, campaign_uuid):
'''
Get campaign
'''
message = None
try:
result = yield self.db.campaigns.find_one(
{
'account':account,
'uuid':campaign_uuid},
{'_id':0}
)
if result:
campaign = campaigns.Campaign(result)
campaign.validate()
message = clean_structure(campaign)
except Exception, e:
logging.exception(e)
raise e
finally:
raise gen.Return(message)
@gen.coroutine
def new_campaign(self, struct):
'''
New campaign
'''
try:
campaign = campaigns.Campaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
try:
result = yield self.db.campaigns.insert(campaign)
message = campaign.get('uuid')
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(message)
@gen.coroutine
def modify_campaign(self, account, campaign_uuid, struct):
'''
Modify campaign
'''
try:
campaign = campaigns.ModifyCampaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
logging.error(campaign)
try:
result = yield self.db.campaigns.update(
{'account':account,
'uuid':campaign_uuid},
{'$set':campaign}
)
logging.info(result)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(result.get('n')))
@gen.coroutine
def replace_campaign(self, account, campaign_uuid, struct):
'''
Replace campaign
'''
try:
campaign = campaigns.Campaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
try:
result = yield self.db.campaigns.update(
{'account':account,
'uuid':campaign_uuid},
{'$set':campaign}
)
logging.info(result)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(result.get('n')))
@gen.coroutine
def remove_campaign(self, account, campaign_uuid):
'''
Remove campaign
'''
message = None
try:
message = yield self.db.campaigns.remove(
{'account':account, 'uuid':campaign_uuid}
)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(message.get('n')))
# check_exist needs some testing.
@gen.coroutine
def check_exist(self, campaign_uuid):
'''
Check if a given campaign exist
'''
try:
exist = yield self.db.campaigns.find_one(
{'uuid': campaign_uuid},
{'uuid':1,
'name':1,
'_id':0})
exist = (True if exist else False)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(exist) | agpl-3.0 | 1,334,203,877,906,721,300 | 24.758794 | 86 | 0.50478 | false |
dksr/graph_utils | weisfeiler_lehman_graph_isomorphism_test.py | 1 | 1864 | from igraph import Graph
def weisfeiler_lehman_graph_isomorphism_test(G1,G2):
""" Performs the Weisfeiler-Lehman test of Isomorphism:
Weisfeiler and Lehman: A reduction of a graph to a canonical form
and an algebra arising during this reduction,
Nauchno-Technicheskaya Informatsiya, Ser. 2, no. 9 (1968), 12-16 (in Russian).
I used this paper to implement the algorithm:
Nino Shervashidze et.al.: Weisfeiler-Lehman Graph Kernels, Journal of Machine Learning Research (2011)
"""
name_codes = {}
MAX_ITRES = 2
node_count = 0
compressed_labels = {}
for h in range(MAX_ITRES):
multi_set_labels = {}
for g in [G1, G2]:
multi_set_labels[g['name']] = {}
for node in g.vs:
neighbours = g.neighbors(node)
lables = g.vs[neighbours]['name']
lables.sort()
new_node_name = node['name'] + '_'.join(lables)
multi_set_labels[g['name']][node.index] = new_node_name
if new_node_name not in compressed_labels:
compressed_labels[new_node_name] = 'c' + repr(node_count)
print new_node_name, compressed_labels[new_node_name]
node_count += 1
for g in [G1, G2]:
for node in g.vs:
node['name'] = compressed_labels[multi_set_labels[g['name']][node.index]]
if __name__ == '__main__':
G1 = Graph([(0,4), (1,4), (4,5), (3,5), (3,4), (2,5), (2,3)])
G2 = Graph([(0,4), (1,3), (4,5), (3,5), (3,4), (2,5), (2,4)])
G1.vs["name"] = ["1", "1", "2", "3", "4", "5"]
G2.vs["name"] = ["1", "2", "2", "3", "4", "5"]
G1["name"] = 'G1'
G2["name"] = 'G2'
weisfeiler_lehman_graph_isomorphism_test(G1, G2)
print G1
print G2 | mit | -2,112,818,726,911,263,500 | 37.854167 | 106 | 0.521459 | false |
jnewland/ha-config | custom_components/senseme/config_flow.py | 1 | 2995 | """Config flow for SenseME."""
import ipaddress
import voluptuous as vol
from aiosenseme import async_get_device_by_ip_address, discover_all
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from .const import CONF_HOST_MANUAL, CONF_INFO, DOMAIN
DISCOVER_TIMEOUT = 5
class SensemeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle SenseME discovery config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self) -> None:
"""Initialize the SenseME config flow."""
self._discovered_devices = None
async def _async_entry_for_device(self, device):
"""Create a config entry for a device."""
await self.async_set_unique_id(device.uuid)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device.name,
data={CONF_INFO: device.get_device_info},
)
async def async_step_manual(self, user_input=None):
"""Handle manual entry of an ip address."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
try:
ipaddress.ip_address(host)
except ValueError:
errors[CONF_HOST] = "invalid_host"
else:
device = await async_get_device_by_ip_address(host)
if device is not None:
return await self._async_entry_for_device(device)
errors[CONF_HOST] = "cannot_connect"
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# start discovery the first time through
if self._discovered_devices is None:
self._discovered_devices = await discover_all(DISCOVER_TIMEOUT)
current_ids = self._async_current_ids()
device_selection = [
device.name
for device in self._discovered_devices
if device.uuid not in current_ids
]
if not device_selection:
return await self.async_step_manual(user_input=None)
device_selection.append(CONF_HOST_MANUAL)
if user_input is not None:
if user_input[CONF_HOST] == CONF_HOST_MANUAL:
return await self.async_step_manual()
for device in self._discovered_devices:
if device == user_input[CONF_HOST]:
return await self._async_entry_for_device(device)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(CONF_HOST, default=device_selection[0]): vol.In(
device_selection
)
}
),
)
| mit | 3,527,471,452,215,158,000 | 32.277778 | 81 | 0.582638 | false |
ssls/beetle-agent | tests/modules/teams/resources/test_modifying_teams.py | 1 | 4600 | # encoding: utf-8
# pylint: disable=missing-docstring
import json
from app.modules.teams import models
def test_new_team_creation(flask_app_client, db, regular_user):
# pylint: disable=invalid-name
team_title = "Test Team Title"
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post('/api/v1/teams/', data={'title': team_title})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'id', 'title'}
assert response.json['title'] == team_title
# Cleanup
team = models.Team.query.get(response.json['id'])
assert team.title == team_title
db.session.delete(team)
db.session.commit()
def test_new_team_creation_with_invalid_data_must_fail(flask_app_client, regular_user):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post('/api/v1/teams/', data={'title': ""})
assert response.status_code == 409
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_update_team_info(flask_app_client, regular_user, team_for_regular_user):
# pylint: disable=invalid-name
team_title = "Test Team Title"
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.patch(
'/api/v1/teams/%d' % team_for_regular_user.id,
content_type='application/json',
data=json.dumps((
{
'op': 'replace',
'path': '/title',
'value': team_title
},
))
)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'id', 'title'}
assert response.json['id'] == team_for_regular_user.id
assert response.json['title'] == team_title
assert team_for_regular_user.title == team_title
def test_update_team_info_with_invalid_data_must_fail(
flask_app_client,
regular_user,
team_for_regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.patch(
'/api/v1/teams/%d' % team_for_regular_user.id,
content_type='application/json',
data=json.dumps((
{
'op': 'replace',
'path': '/title',
'value': '',
},
))
)
assert response.status_code == 409
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_team_deletion(flask_app_client, regular_user, team_for_regular_user):
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.delete(
'/api/v1/teams/%d' % team_for_regular_user.id
)
assert response.status_code == 200
assert response.content_type == 'application/json'
def test_add_new_team_member(flask_app_client, db, regular_user, admin_user, team_for_regular_user):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post(
'/api/v1/teams/%d/members/' % team_for_regular_user.id,
data={
'user_id': admin_user.id,
}
)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'team', 'user', 'is_leader'}
assert response.json['team']['id'] == team_for_regular_user.id
assert response.json['user']['id'] == admin_user.id
# Cleanup
team_members = models.TeamMember.query.filter_by(team=team_for_regular_user, user=admin_user)
assert team_members.count() == 1
team_members.delete()
db.session.commit()
def test_delete_team_member(
flask_app_client, db, regular_user, readonly_user, team_for_regular_user
):
# pylint: disable=invalid-name,unused-argument
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.delete(
'/api/v1/teams/%d/members/%d' % (team_for_regular_user.id, readonly_user.id),
)
assert response.status_code == 200
assert response.content_type == 'application/json'
| mit | 1,529,536,958,977,042,000 | 35.220472 | 100 | 0.61587 | false |
vgrem/Office365-REST-Python-Client | tests/sharepoint/test_site.py | 1 | 1652 | from office365.sharepoint.web_application.webApplication import WebApplication
from tests import test_site_url
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.lists.list_template_type import ListTemplateType
from office365.sharepoint.sites.site import Site
class TestSite(SPTestCase):
target_site = None # type: Site
def test1_if_site_loaded(self):
site = self.client.site.get().execute_query()
self.assertIs(site.is_property_available('Url'), True, "Site resource was not requested")
self.assertIs(site.is_property_available('RootWeb'), False)
self.__class__.target_site = site
def test2_if_site_exists(self):
site_url = self.__class__.target_site.url
result = Site.exists(self.client, site_url).execute_query()
self.assertIsNotNone(result.value)
def test3_get_site_by_id(self):
site_id = self.__class__.target_site.properties['Id']
result = Site.get_url_by_id(self.client, site_id).execute_query()
self.assertIsNotNone(result.value)
def test4_get_site_catalog(self):
catalog = self.client.site.get_catalog(ListTemplateType.AppDataCatalog).get().execute_query()
self.assertIsNotNone(catalog.title)
def test5_get_web_templates(self):
web_templates = self.client.site.get_web_templates().execute_query()
self.assertIsNotNone(web_templates)
def test6_get_web_template_by_name(self):
template_name = "GLOBAL#0"
web_template = self.client.site.get_web_templates().get_by_name(template_name).get().execute_query()
self.assertIsNotNone(web_template)
| mit | 7,089,965,349,920,508,000 | 41.358974 | 108 | 0.70339 | false |
Nic30/hwtLib | hwtLib/amba/datapump/interconnect/rStrictOrder_test.py | 1 | 5032 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.amba.datapump.interconnect.rStricOrder import RStrictOrderInterconnect
from hwtLib.amba.datapump.sim_ram import AxiDpSimRam
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.constants import CLK_PERIOD
class RStrictOrderInterconnectTC(SimTestCase):
@classmethod
def setUpClass(cls):
u = cls.u = RStrictOrderInterconnect()
u.ID_WIDTH = 4
cls.DRIVERS_CNT = 3
u.DRIVER_CNT = cls.DRIVERS_CNT
cls.MAX_TRANS_OVERLAP = 4
u.MAX_TRANS_OVERLAP = cls.MAX_TRANS_OVERLAP
u.DATA_WIDTH = 64
cls.compileSim(u)
def test_nop(self):
u = self.u
self.runSim(20 * CLK_PERIOD)
for d in u.drivers:
self.assertEqual(len(d.r._ag.data), 0)
self.assertEqual(len(u.rDatapump.req._ag.data), 0)
def test_passWithouData(self):
u = self.u
for i, driver in enumerate(u.drivers):
driver.req._ag.data.append((i + 1, i + 1, i + 1, 0))
self.runSim((self.DRIVERS_CNT * 2) * CLK_PERIOD)
for d in u.drivers:
self.assertEqual(len(d.r._ag.data), 0)
self.assertEqual(len(u.rDatapump.req._ag.data), self.DRIVERS_CNT)
for i, req in enumerate(u.rDatapump.req._ag.data):
self.assertValSequenceEqual(req,
(i + 1, i + 1, i + 1, 0))
def test_passWithData(self):
u = self.u
for i, driver in enumerate(u.drivers):
_id = i + 1
_len = i + 1
driver.req._ag.data.append((_id, i + 1, _len, 0))
for i2 in range(_len + 1):
d = (_id, i + 1, mask(u.DATA_WIDTH // 8), i2 == _len)
u.rDatapump.r._ag.data.append(d)
self.runSim(20 * CLK_PERIOD)
for i, d in enumerate(u.drivers):
self.assertEqual(len(d.r._ag.data), i + 1 + 1)
self.assertEqual(len(u.rDatapump.req._ag.data), self.DRIVERS_CNT)
for i, req in enumerate(u.rDatapump.req._ag.data):
self.assertValSequenceEqual(req,
(i + 1, i + 1, i + 1, 0))
def test_randomized(self):
u = self.u
m = AxiDpSimRam(u.DATA_WIDTH, u.clk, u.rDatapump)
for d in u.drivers:
self.randomize(d.req)
self.randomize(d.r)
self.randomize(u.rDatapump.req)
self.randomize(u.rDatapump.r)
def prepare(driverIndex, addr, size, valBase=1, _id=1):
driver = u.drivers[driverIndex]
driver.req._ag.data.append((_id, addr, size - 1, 0))
expected = []
_mask = mask(u.DATA_WIDTH // 8)
index = addr // (u.DATA_WIDTH // 8)
for i in range(size):
v = valBase + i
m.data[index + i] = v
d = (_id, v, _mask, int(i == size - 1))
expected.append(d)
return expected
def check(driverIndex, expected):
driverData = u.drivers[driverIndex].r._ag.data
self.assertEqual(len(driverData), len(expected))
for d, e in zip(driverData, expected):
self.assertValSequenceEqual(d, e)
d0 = prepare(0, 0x1000, 3, 99, _id=0)
# + prepare(0, 0x2000, 1, 100, _id=0) + prepare(0, 0x3000, 16, 101)
d1 = prepare(1, 0x4000, 3, 200, _id=1) + prepare(1, 0x5000, 1, 201, _id=1)
# + prepare(1, 0x6000, 16, 202) #+ prepare(1, 0x7000, 16, 203)
self.runSim(100 * CLK_PERIOD)
check(0, d0)
check(1, d1)
def test_randomized2(self):
u = self.u
m = AxiDpSimRam(u.DATA_WIDTH, u.clk, u.rDatapump)
N = 17
for d in u.drivers:
self.randomize(d.req)
self.randomize(d.r)
self.randomize(u.rDatapump.req)
self.randomize(u.rDatapump.r)
_mask = mask(u.DATA_WIDTH // 8)
expected = [[] for _ in u.drivers]
for _id, d in enumerate(u.drivers):
for i in range(N):
size = self._rand.getrandbits(3) + 1
magic = self._rand.getrandbits(16)
values = [i + magic for i in range(size)]
addr = m.calloc(size, u.DATA_WIDTH // 8, initValues=values)
d.req._ag.data.append((_id, addr, size - 1, 0))
for i2, v in enumerate(values):
data = (_id, v, _mask, int(i2 == size - 1))
expected[_id].append(data)
self.runSim(self.DRIVERS_CNT * N * 20 * CLK_PERIOD)
for expect, driver in zip(expected, u.drivers):
self.assertValSequenceEqual(driver.r._ag.data, expect)
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(RStrictOrderInterconnectTC('test_passWithouData'))
suite.addTest(unittest.makeSuite(RStrictOrderInterconnectTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| mit | -4,935,728,629,517,168,000 | 32.105263 | 82 | 0.548092 | false |
spiside/pdb-tutorial | solutions/dicegame/runner.py | 1 | 1693 | from .die import Die
from .utils import i_just_throw_an_exception
class GameRunner:
def __init__(self):
self.dice = Die.create_dice(5)
self.reset()
def reset(self):
self.round = 1
self.wins = 0
self.loses = 0
@property
def answer(self):
total = 0
for die in self.dice:
total += die.value
return total
@classmethod
def run(cls):
count = 0
runner = cls()
while True:
print("Round {}\n".format(runner.round))
for die in runner.dice:
print(die.show())
guess = input("Sigh. What is your guess?: ")
guess = int(guess)
if guess == runner.answer:
print("Congrats, you can add like a 5 year old...")
runner.wins += 1
count += 1
runner.consecutive_wins += 1
else:
print("Sorry that's wrong")
print("The answer is: {}".format(runner.answer))
print("Like seriously, how could you mess that up")
runner.loses += 1
count = 0
print("Wins: {} Loses {}".format(runner.wins, runner.loses))
runner.round += 1
if count == 6:
print("You won... Congrats...")
print("The fact it took you so long is pretty sad")
break
prompt = input("Would you like to play again?[Y/n]: ")
if prompt.lower() == 'y' or prompt == '':
continue
else:
break
| mit | -5,400,822,887,541,992,000 | 26.216667 | 72 | 0.453042 | false |
joseandro/liToCNF | sValidator.py | 1 | 1230 | # This module validates inequations along with their solutions
# Author: Joseandro Luiz
def isValid(cnf, res):
"""
This function validates a CNF
@rtype: bool
"""
# Turn all variables from CNF positive (we have to compare them later)
andBoolClause = None
for i in cnf :
orBoolClause = None
for j, val in enumerate(i) :
isFound = False
modVal = val
orBool = None
if modVal < 0:
modVal *= -1
try:
if res.index(modVal) >= 0:
isFound = True
except ValueError:
pass
if isFound == True:
if j > 0:
orBool = True
else:
orBool = False
elif i[j] > 0:
orBool = False
else:
orBool = True
if orBoolClause == None:
orBoolClause = orBool
else:
orBoolClause = orBoolClause or orBool
if andBoolClause is None:
andBoolClause = orBoolClause
else:
andBoolClause = andBoolClause and orBoolClause
return andBoolClause | gpl-3.0 | -8,536,422,730,506,171,000 | 25.76087 | 74 | 0.481301 | false |
manashmndl/LearningPyQt | pyqt/chap11/contactdlg.py | 1 | 5621 | #!/usr/bin/env python
# Copyright (c) 2008-14 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future_builtins import *
import sys
from PyQt4.QtCore import (QVariant, Qt)
from PyQt4.QtGui import (QApplication, QComboBox, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QVBoxLayout)
class ContactDlg(QDialog):
StyleSheet = """
QComboBox { color: darkblue; }
QLineEdit { color: darkgreen; }
QLineEdit[mandatory="true"] {
background-color: rgb(255, 255, 127);
color: darkblue;
}
"""
def __init__(self, parent=None):
super(ContactDlg, self).__init__(parent)
self.create_widgets()
self.layout_widgets()
self.create_connections()
self.lineedits = (self.forenameEdit, self.surnameEdit,
self.companyEdit, self.phoneEdit, self.emailEdit)
for lineEdit in self.lineedits:
lineEdit.setProperty("mandatory", QVariant(True))
lineEdit.textEdited.connect(self.updateUi)
self.categoryComboBox.activated[int].connect(self.updateUi)
self.setStyleSheet(ContactDlg.StyleSheet)
self.setWindowTitle("Add Contact")
# An alternative would be to not create the QLabels but instead use a
# QFormLayout
def create_widgets(self):
self.forenameLabel = QLabel("&Forename:")
self.forenameEdit = QLineEdit()
self.forenameLabel.setBuddy(self.forenameEdit)
self.surnameLabel = QLabel("&Surname:")
self.surnameEdit = QLineEdit()
self.surnameLabel.setBuddy(self.surnameEdit)
self.categoryLabel = QLabel("&Category:")
self.categoryComboBox = QComboBox()
self.categoryLabel.setBuddy(self.categoryComboBox)
self.categoryComboBox.addItems(["Business", "Domestic",
"Personal"])
self.companyLabel = QLabel("C&ompany:")
self.companyEdit = QLineEdit()
self.companyLabel.setBuddy(self.companyEdit)
self.addressLabel = QLabel("A&ddress:")
self.addressEdit = QLineEdit()
self.addressLabel.setBuddy(self.addressEdit)
self.phoneLabel = QLabel("&Phone:")
self.phoneEdit = QLineEdit()
self.phoneLabel.setBuddy(self.phoneEdit)
self.mobileLabel = QLabel("&Mobile:")
self.mobileEdit = QLineEdit()
self.mobileLabel.setBuddy(self.mobileEdit)
self.faxLabel = QLabel("Fa&x:")
self.faxEdit = QLineEdit()
self.faxLabel.setBuddy(self.faxEdit)
self.emailLabel = QLabel("&Email:")
self.emailEdit = QLineEdit()
self.emailLabel.setBuddy(self.emailEdit)
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
addButton = self.buttonBox.button(QDialogButtonBox.Ok)
addButton.setText("&Add")
addButton.setEnabled(False)
def layout_widgets(self):
grid = QGridLayout()
grid.addWidget(self.forenameLabel, 0, 0)
grid.addWidget(self.forenameEdit, 0, 1)
grid.addWidget(self.surnameLabel, 0, 2)
grid.addWidget(self.surnameEdit, 0, 3)
grid.addWidget(self.categoryLabel, 1, 0)
grid.addWidget(self.categoryComboBox, 1, 1)
grid.addWidget(self.companyLabel, 1, 2)
grid.addWidget(self.companyEdit, 1, 3)
grid.addWidget(self.addressLabel, 2, 0)
grid.addWidget(self.addressEdit, 2, 1, 1, 3)
grid.addWidget(self.phoneLabel, 3, 0)
grid.addWidget(self.phoneEdit, 3, 1)
grid.addWidget(self.mobileLabel, 3, 2)
grid.addWidget(self.mobileEdit, 3, 3)
grid.addWidget(self.faxLabel, 4, 0)
grid.addWidget(self.faxEdit, 4, 1)
grid.addWidget(self.emailLabel, 4, 2)
grid.addWidget(self.emailEdit, 4, 3)
layout = QVBoxLayout()
layout.addLayout(grid)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
def create_connections(self):
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def updateUi(self):
mandatory = self.companyEdit.property("mandatory").toBool()
if self.categoryComboBox.currentText() == "Business":
if not mandatory:
self.companyEdit.setProperty("mandatory", QVariant(True))
elif mandatory:
self.companyEdit.setProperty("mandatory", QVariant(False))
if (mandatory !=
self.companyEdit.property("mandatory").toBool()):
self.setStyleSheet(ContactDlg.StyleSheet)
enable = True
for lineEdit in self.lineedits:
if (lineEdit.property("mandatory").toBool() and
lineEdit.text().isEmpty()):
enable = False
break
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enable)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = ContactDlg()
form.show()
app.exec_()
| mit | 7,333,913,285,468,876,000 | 38.584507 | 74 | 0.654688 | false |
nicolas-shupe/flavorgator | public/map_app/multiply.py | 1 | 1150 | from numpy import *
s = [[2407,1860]]
p =.25
def rekt ():
for n in range(0,len(s)):
for m in range (len(s[n])):
s[n][m] = round((s[n][m] * p),2)
return s
"""
[[343.35, 137.38, 342.74, 150.59, 222.25, 153.91, 222.25, 141.05],
[402.5, 0.88, 430.24, 1.14, 430.59, 42.61, 406.52, 42.96],
[345.45, 198.89, 406.09, 196.52, 405.91, 209.91, 345.8, 213.59],
[406.09, 196.17, 435.4, 195.82, 436.19, 210.17, 407.14, 209.56],
[436.97, 195.39, 485.45, 194.69, 485.1, 209.21, 438.11, 210.35],
[569.45, 291.38, 592.11, 290.06, 590.62, 395.41, 568.31, 396.11],
[568.84, 397.34, 590.97, 397.95, 592.55, 424.29, 568.31, 424.11],
[138.69, 355.07, 149.89, 355.6, 149.89, 432.07, 138.69, 434.0],
[153.82, 355.86, 164.06, 355.07, 164.06, 377.56, 154.17, 377.04],
[153.39, 378.96, 164.67, 379.49, 165.29, 404.16, 153.82, 403.99],
[152.69, 406.09, 164.32, 405.47, 167.74, 431.55, 154.17, 429.8],
[141.05, 255.5, 140.26, 197.31, 197.84, 197.92, 197.84, 211.84, 159.86, 213.06, 156.89, 255.32],
[253.92, 186.9, 293.82, 186.72,296.36, 208.86, 255.41, 208.25]] """ | mit | 7,777,998,880,448,629,000 | 36.129032 | 104 | 0.531304 | false |
tylerclair/py3canvas | py3canvas/apis/user_observees.py | 1 | 5486 | """UserObservees API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
class UserObserveesAPI(BaseCanvasAPI):
"""UserObservees API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for UserObserveesAPI."""
super(UserObserveesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.UserObserveesAPI")
def list_observees(self, user_id, include=None):
"""
List observees.
List the users that the given user is observing.
*Note:* all users are allowed to list their own observees. Administrators can list
other users' observees.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - include
"""- "avatar_url": Optionally include avatar_url."""
if include is not None:
self._validate_enum(include, ["avatar_url"])
params["include"] = include
self.logger.debug("GET /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, all_pages=True)
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None):
"""
Add an observee with credentials.
Register the given user to observe another user, given the observee's credentials.
*Note:* all users are allowed to add their own observees, given the observee's
credentials or access token are provided. Administrators can add observees given credentials, access token or
the {api:UserObserveesController#update observee's id}.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - observee[unique_id]
"""The login id for the user to observe. Required if access_token is omitted."""
if observee_unique_id is not None:
data["observee[unique_id]"] = observee_unique_id
# OPTIONAL - observee[password]
"""The password for the user to observe. Required if access_token is omitted."""
if observee_password is not None:
data["observee[password]"] = observee_password
# OPTIONAL - access_token
"""The access token for the user to observe. Required if <tt>observee[unique_id]</tt> or <tt>observee[password]</tt> are omitted."""
if access_token is not None:
data["access_token"] = access_token
self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True)
def show_observee(self, user_id, observee_id):
"""
Show an observee.
Gets information about an observed user.
*Note:* all users are allowed to view their own observees.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("GET /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
def add_observee(self, user_id, observee_id):
"""
Add an observee.
Registers a user as being observed by the given user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("PUT /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
def remove_observee(self, user_id, observee_id):
"""
Remove an observee.
Unregisters a user as being observed by the given user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("DELETE /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
| mit | -4,811,413,284,564,236,000 | 36.834483 | 174 | 0.603172 | false |
caoziyao/weibo | models/messageCopy.py | 1 | 1245 | import hashlib
import os
from . import ModelMixin
from . import db
from utils import timestamp
from utils import log
from utils import sh1hexdigest
class MessageCopy(db.Model, ModelMixin):
__tablename__ = 'messageCopys'
Cid = db.Column(db.Integer, primary_key=True) # 转发编号
# Ccontent = db.Column(db.String(150)) # 转发内容
# Cdatetime = db.Column(db.Integer) # 转发时间
# 外键
# u_id = db.Column(db.Integer, db.ForeignKey('users.Uid')) # 用户编号
m_follow_id = db.Column(db.Integer, db.ForeignKey('messages.Mid')) # 消息编号
m_fans_id = db.Column(db.Integer, db.ForeignKey('messages.Mid')) # 消息编号
# 定义一个关系
# 自动关联 不用手动查询就有数据
# user = db.relationship('User', backref='messageCopy', foreign_keys='MessageCopy.u_id')
m_follow = db.relationship('Message', backref='messagCopyFollow', foreign_keys='MessageCopy.m_follow_id')
m_fans = db.relationship('Message', backref='messagCopyFans', foreign_keys='MessageCopy.m_fans_id')
def __init__(self):
super(MessageCopy, self).__init__()
# self.Ccontent = form.get('content', '')
# self.Cdatetime = timestamp()
| mit | -8,593,585,503,986,119,000 | 35.03125 | 110 | 0.651344 | false |
ArcherSys/ArcherSys | node_modules/npm/node_modules/node-gyp/gyp/gyptest.py | 1 | 24104 | <<<<<<< HEAD
<<<<<<< HEAD
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
=======
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -4,964,498,452,634,644,000 | 28.111111 | 78 | 0.597826 | false |
google/graphicsfuzz | gfauto/gfauto/run_bin.py | 1 | 2346 | # -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a binary from the given binary name and settings file."""
import argparse
import subprocess
import sys
from pathlib import Path
from typing import List
from gfauto import binaries_util, settings_util
from gfauto.gflogging import log
def main() -> int:
parser = argparse.ArgumentParser(
description="Runs a binary given the binary name and settings.json file. "
"Use -- to separate args to run_bin and your binary. "
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parser.add_argument(
"binary_name",
help="The name of the binary to run. E.g. spirv-opt, glslangValidator",
type=str,
)
parser.add_argument(
"arguments",
metavar="arguments",
type=str,
nargs="*",
help="The arguments to pass to the binary",
)
parsed_args = parser.parse_args(sys.argv[1:])
# Args.
settings_path: Path = Path(parsed_args.settings)
binary_name: str = parsed_args.binary_name
arguments: List[str] = parsed_args.arguments
try:
settings = settings_util.read_or_create(settings_path)
except settings_util.NoSettingsFile:
log(f"Settings file {str(settings_path)} was created for you; using this.")
settings = settings_util.read_or_create(settings_path)
binary_manager = binaries_util.get_default_binary_manager(settings=settings)
cmd = [str(binary_manager.get_binary_path_by_name(binary_name).path)]
cmd.extend(arguments)
return subprocess.run(cmd, check=False).returncode
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 3,527,463,983,964,285,000 | 29.868421 | 83 | 0.684996 | false |
masayuko/nikola | nikola/post.py | 1 | 47243 | # -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""The Post class."""
from __future__ import unicode_literals, print_function, absolute_import
import io
from collections import defaultdict
import datetime
import hashlib
import json
import os
import re
import string
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from . import utils
import dateutil.tz
import lxml.html
import html5lib
import natsort
try:
import pyphen
except ImportError:
pyphen = None
from math import ceil
# for tearDown with _reload we cannot use 'from import' to get forLocaleBorg
import nikola.utils
from .utils import (
current_time,
Functionary,
LOGGER,
LocaleBorg,
slugify,
to_datetime,
unicode_str,
demote_headers,
get_translation_candidate,
unslugify,
)
from .rc4 import rc4
__all__ = ('Post',)
TEASER_REGEXP = re.compile('<!--\s*TEASER_END(:(.+))?\s*-->', re.IGNORECASE)
_UPGRADE_METADATA_ADVERTISED = False
class Post(object):
"""Represent a blog post or site page."""
def __init__(
self,
source_path,
config,
destination,
use_in_feeds,
messages,
template_name,
compiler
):
"""Initialize post.
The source path is the user created post file. From it we calculate
the meta file, as well as any translations available, and
the .html fragment file path.
"""
self.config = config
self.compiler = compiler
self.compile_html = self.compiler.compile_html
self.demote_headers = self.compiler.demote_headers and self.config['DEMOTE_HEADERS']
tzinfo = self.config['__tzinfo__']
if self.config['FUTURE_IS_NOW']:
self.current_time = None
else:
self.current_time = current_time(tzinfo)
self.translated_to = set([])
self._prev_post = None
self._next_post = None
self.base_url = self.config['BASE_URL']
self.is_draft = False
self.is_private = False
self.is_mathjax = False
self.strip_indexes = self.config['STRIP_INDEXES']
self.index_file = self.config['INDEX_FILE']
self.pretty_urls = self.config['PRETTY_URLS']
self.source_path = source_path # posts/blah.txt
self.post_name = os.path.splitext(source_path)[0] # posts/blah
# cache[\/]posts[\/]blah.html
self.base_path = os.path.join(self.config['CACHE_FOLDER'], self.post_name + ".html")
# cache/posts/blah.html
self._base_path = self.base_path.replace('\\', '/')
self.metadata_path = self.post_name + ".meta" # posts/blah.meta
self.folder = destination
self.translations = self.config['TRANSLATIONS']
self.default_lang = self.config['DEFAULT_LANG']
self.messages = messages
self.skip_untranslated = not self.config['SHOW_UNTRANSLATED_POSTS']
self._template_name = template_name
self.is_two_file = True
self.newstylemeta = True
self._reading_time = None
self._remaining_reading_time = None
self._paragraph_count = None
self._remaining_paragraph_count = None
self._dependency_file_fragment = defaultdict(list)
self._dependency_file_page = defaultdict(list)
self._dependency_uptodate_fragment = defaultdict(list)
self._dependency_uptodate_page = defaultdict(list)
default_metadata, self.newstylemeta = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'])
self.meta = Functionary(lambda: None, self.default_lang)
self.meta[self.default_lang] = default_metadata
# Load internationalized metadata
for lang in self.translations:
if os.path.isfile(get_translation_candidate(self.config, self.source_path, lang)):
self.translated_to.add(lang)
if lang != self.default_lang:
meta = defaultdict(lambda: '')
meta.update(default_metadata)
_meta, _nsm = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'], lang)
self.newstylemeta = self.newstylemeta and _nsm
meta.update(_meta)
self.meta[lang] = meta
if not self.is_translation_available(self.default_lang):
# Special case! (Issue #373)
# Fill default_metadata with stuff from the other languages
for lang in sorted(self.translated_to):
default_metadata.update(self.meta[lang])
if 'date' not in default_metadata and not use_in_feeds:
# For stories we don't *really* need a date
if self.config['__invariant__']:
default_metadata['date'] = datetime.datetime(2013, 12, 31, 23, 59, 59, tzinfo=tzinfo)
else:
default_metadata['date'] = datetime.datetime.utcfromtimestamp(
os.stat(self.source_path).st_ctime).replace(tzinfo=dateutil.tz.tzutc()).astimezone(tzinfo)
# If time zone is set, build localized datetime.
self.date = to_datetime(self.meta[self.default_lang]['date'], tzinfo)
if 'updated' not in default_metadata:
default_metadata['updated'] = default_metadata.get('date', None)
self.updated = to_datetime(default_metadata['updated'])
if 'title' not in default_metadata or 'slug' not in default_metadata \
or 'date' not in default_metadata:
raise OSError("You must set a title (found '{0}'), a slug (found "
"'{1}') and a date (found '{2}')! [in file "
"{3}]".format(default_metadata.get('title', None),
default_metadata.get('slug', None),
default_metadata.get('date', None),
source_path))
if 'type' not in default_metadata:
# default value is 'text'
default_metadata['type'] = 'text'
self.publish_later = False if self.current_time is None else self.date >= self.current_time
is_draft = False
is_private = False
self._tags = {}
for lang in self.translated_to:
self._tags[lang] = natsort.natsorted(
list(set([x.strip() for x in self.meta[lang]['tags'].split(',')])),
alg=natsort.ns.F | natsort.ns.IC)
self._tags[lang] = [t for t in self._tags[lang] if t]
if 'draft' in [_.lower() for _ in self._tags[lang]]:
is_draft = True
LOGGER.debug('The post "{0}" is a draft.'.format(self.source_path))
self._tags[lang].remove('draft')
# TODO: remove in v8
if 'retired' in self._tags[lang]:
is_private = True
LOGGER.warning('The "retired" tag in post "{0}" is now deprecated and will be removed in v8. Use "private" instead.'.format(self.source_path))
self._tags[lang].remove('retired')
# end remove in v8
if 'private' in self._tags[lang]:
is_private = True
LOGGER.debug('The post "{0}" is private.'.format(self.source_path))
self._tags[lang].remove('private')
# While draft comes from the tags, it's not really a tag
self.is_draft = is_draft
self.is_private = is_private
self.is_post = use_in_feeds
self.use_in_feeds = use_in_feeds and not is_draft and not is_private \
and not self.publish_later
# If mathjax is a tag, or it's a ipynb post, then enable mathjax rendering support
self.is_mathjax = ('mathjax' in self.tags) or (self.compiler.name == 'ipynb')
# Register potential extra dependencies
self.compiler.register_extra_dependencies(self)
def _get_hyphenate(self):
return bool(self.config['HYPHENATE'] or self.meta('hyphenate'))
hyphenate = property(_get_hyphenate)
def __repr__(self):
"""Provide a representation of the post object."""
# Calculate a hash that represents most data about the post
m = hashlib.md5()
# source_path modification date (to avoid reading it)
m.update(utils.unicode_str(os.stat(self.source_path).st_mtime).encode('utf-8'))
clean_meta = {}
for k, v in self.meta.items():
sub_meta = {}
clean_meta[k] = sub_meta
for kk, vv in v.items():
if vv:
sub_meta[kk] = vv
m.update(utils.unicode_str(json.dumps(clean_meta, cls=utils.CustomEncoder, sort_keys=True)).encode('utf-8'))
return '<Post: {0!r} {1}>'.format(self.source_path, m.hexdigest())
def _has_pretty_url(self, lang):
if self.pretty_urls and \
self.meta[lang].get('pretty_url', '') != 'False' and \
self.meta[lang]['slug'] != 'index':
return True
else:
return False
@property
def alltags(self):
"""Return ALL the tags for this post."""
tags = []
for l in self._tags:
tags.extend(self._tags[l])
return list(set(tags))
def tags_for_language(self, lang):
"""Return tags for a given language."""
if lang in self._tags:
return self._tags[lang]
elif lang not in self.translated_to and self.skip_untranslated:
return []
elif self.default_lang in self._tags:
return self._tags[self.default_lang]
else:
return []
@property
def tags(self):
"""Return tags for the current language."""
lang = nikola.utils.LocaleBorg().current_lang
return self.tags_for_language(lang)
@property
def prev_post(self):
"""Return previous post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._prev_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._prev_post
return rv
@prev_post.setter # NOQA
def prev_post(self, v):
"""Set previous post."""
self._prev_post = v
@property
def next_post(self):
"""Return next post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._next_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._next_post
return rv
@next_post.setter # NOQA
def next_post(self, v):
"""Set next post."""
self._next_post = v
@property
def template_name(self):
"""Return template name for this post."""
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['template'] or self._template_name
def formatted_date(self, date_format, date=None):
"""Return the formatted date as unicode."""
return utils.LocaleBorg().formatted_date(date_format, date if date else self.date)
def formatted_updated(self, date_format):
"""Return the updated date as unicode."""
return self.formatted_date(date_format, self.updated)
def title(self, lang=None):
"""Return localized title.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['title']
def author(self, lang=None):
"""Return localized author or BLOG_AUTHOR if unspecified.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self.meta[lang]['author']:
author = self.meta[lang]['author']
else:
author = self.config['BLOG_AUTHOR'](lang)
return author
def description(self, lang=None):
"""Return localized description."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['description']
def add_dependency(self, dependency, add='both', lang=None):
"""Add a file dependency for tasks using that post.
The ``dependency`` should be a string specifying a path, or a callable
which returns such a string or a list of strings.
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
"""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self._dependency_file_fragment[lang].append((type(dependency) != str, dependency))
if add == 'page' or add == 'both':
self._dependency_file_page[lang].append((type(dependency) != str, dependency))
def add_dependency_uptodate(self, dependency, is_callable=False, add='both', lang=None):
"""Add a dependency for task's ``uptodate`` for tasks using that post.
This can be for example an ``utils.config_changed`` object, or a list of
such objects.
The ``is_callable`` parameter specifies whether ``dependency`` is a
callable which generates an entry or a list of entries for the ``uptodate``
list, or whether it is an entry which can directly be added (as a single
object or a list of objects).
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
Example:
post.add_dependency_uptodate(
utils.config_changed({1: some_data}, 'uniqueid'), False, 'page')
"""
if add == 'fragment' or add == 'both':
self._dependency_uptodate_fragment[lang].append((is_callable, dependency))
if add == 'page' or add == 'both':
self._dependency_uptodate_page[lang].append((is_callable, dependency))
def _get_dependencies(self, deps_list):
deps = []
for dep in deps_list:
if dep[0]:
# callable
result = dep[1]()
else:
# can add directly
result = dep[1]
# if result is a list, add its contents
if type(result) == list:
deps.extend(result)
else:
deps.append(result)
return deps
def deps(self, lang):
"""Return a list of file dependencies to build this post's page."""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.base_path)
deps.append(self.source_path)
if os.path.exists(self.metadata_path):
deps.append(self.metadata_path)
if lang != self.default_lang:
cand_1 = get_translation_candidate(self.config, self.source_path, lang)
cand_2 = get_translation_candidate(self.config, self.base_path, lang)
if os.path.exists(cand_1):
deps.extend([cand_1, cand_2])
cand_3 = get_translation_candidate(self.config, self.metadata_path, lang)
if os.path.exists(cand_3):
deps.append(cand_3)
deps += self._get_dependencies(self._dependency_file_page[lang])
deps += self._get_dependencies(self._dependency_file_page[None])
return sorted(deps)
def deps_uptodate(self, lang):
"""Return a list of uptodate dependencies to build this post's page.
These dependencies should be included in ``uptodate`` for the task
which generates the page.
"""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_page[lang])
deps += self._get_dependencies(self._dependency_uptodate_page[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def compile(self, lang):
"""Generate the cache/ file with the compiled post."""
def wrap_encrypt(path, password):
"""Wrap a post with encryption."""
with io.open(path, 'r+', encoding='utf8') as inf:
data = inf.read() + "<!--tail-->"
data = CRYPT.substitute(data=rc4(password, data))
with io.open(path, 'w+', encoding='utf8') as outf:
outf.write(data)
dest = self.translated_base_path(lang)
if not self.is_translation_available(lang) and not self.config['SHOW_UNTRANSLATED_POSTS']:
return
# Set the language to the right thing
LocaleBorg().set_locale(lang)
self.compile_html(
self.translated_source_path(lang),
dest,
self.is_two_file),
if self.meta('password'):
# TODO: get rid of this feature one day (v8?; warning added in v7.3.0.)
LOGGER.warn("The post {0} is using the `password` attribute, which may stop working in the future.")
LOGGER.warn("Please consider switching to a more secure method of encryption.")
LOGGER.warn("More details: https://github.com/getnikola/nikola/issues/1547")
wrap_encrypt(dest, self.meta('password'))
if self.publish_later:
LOGGER.notice('{0} is scheduled to be published in the future ({1})'.format(
self.source_path, self.date))
def fragment_deps(self, lang):
"""Return a list of uptodate dependencies to build this post's fragment.
These dependencies should be included in ``uptodate`` for the task
which generates the fragment.
"""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.source_path)
if os.path.isfile(self.metadata_path):
deps.append(self.metadata_path)
lang_deps = []
if lang != self.default_lang:
lang_deps = [get_translation_candidate(self.config, d, lang) for d in deps]
deps += lang_deps
deps = [d for d in deps if os.path.exists(d)]
deps += self._get_dependencies(self._dependency_file_fragment[lang])
deps += self._get_dependencies(self._dependency_file_fragment[None])
return sorted(deps)
def fragment_deps_uptodate(self, lang):
"""Return a list of file dependencies to build this post's fragment."""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_fragment[lang])
deps += self._get_dependencies(self._dependency_uptodate_fragment[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def is_translation_available(self, lang):
"""Return True if the translation actually exists."""
return lang in self.translated_to
def translated_source_path(self, lang):
"""Return path to the translation's source file."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, lang)
elif lang != self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, sorted(self.translated_to)[0])
def translated_base_path(self, lang):
"""Return path to the translation's base_path file."""
return get_translation_candidate(self.config, self.base_path, lang)
def _translated_file_path(self, lang):
"""Return path to the translation's file, or to the original."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, lang)
elif lang != self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, sorted(self.translated_to)[0])
def text(self, lang=None, teaser_only=False, strip_html=False, show_read_more_link=True,
feed_read_more_link=False, feed_links_append_query=None):
"""Read the post file for that language and return its contents.
teaser_only=True breaks at the teaser marker and returns only the teaser.
strip_html=True removes HTML tags
show_read_more_link=False does not add the Read more... link
feed_read_more_link=True uses FEED_READ_MORE_LINK instead of INDEX_READ_MORE_LINK
lang=None uses the last used to set locale
All links in the returned HTML will be relative.
The HTML returned is a bare fragment, not a full document.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
# Yes, we compile it and screw it.
# This may be controversial, but the user (or someone) is asking for the post text
# and the post should not just refuse to give it.
if not os.path.isfile(file_name):
self.compile(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
if self.compiler.extension() == '.php':
return data
try:
document = html5lib.html5parser.parse(data, treebuilder='lxml',
namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
base_url = self.permalink(lang=lang)
document.make_links_absolute(base_url)
if self.hyphenate:
hyphenate(document, lang)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except:
data = lxml.html.tostring(document, encoding='unicode')
if teaser_only:
teaser_regexp = self.config.get('TEASER_REGEXP', TEASER_REGEXP)
teaser = teaser_regexp.split(data)[0]
if teaser != data:
if not strip_html and show_read_more_link:
if teaser_regexp.search(data).groups()[-1]:
teaser_text = teaser_regexp.search(data).groups()[-1]
else:
teaser_text = self.messages[lang]["Read more"]
l = self.config['FEED_READ_MORE_LINK'](lang) if feed_read_more_link else self.config['INDEX_READ_MORE_LINK'](lang)
teaser += l.format(
link=self.permalink(lang, query=feed_links_append_query),
read_more=teaser_text,
min_remaining_read=self.messages[lang]["%d min remaining to read"] % (self.remaining_reading_time),
reading_time=self.reading_time,
remaining_reading_time=self.remaining_reading_time,
paragraph_count=self.paragraph_count,
remaining_paragraph_count=self.remaining_paragraph_count)
# This closes all open tags and sanitizes the broken HTML
document = lxml.html.fromstring(teaser)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except IndexError:
data = lxml.html.tostring(document, encoding='unicode')
if data and strip_html:
try:
# Not all posts have a body. For example, you may have a page statically defined in the template that does not take content as input.
content = lxml.html.fromstring(data)
data = content.text_content().strip() # No whitespace wanted.
except lxml.etree.ParserError:
data = ""
elif data:
if self.demote_headers:
# see above
try:
document = lxml.html.fromstring(data)
demote_headers(document, self.demote_headers)
data = lxml.html.tostring(document.body, encoding='unicode')
except (lxml.etree.ParserError, IndexError):
data = lxml.html.tostring(document, encoding='unicode')
return data
@property
def reading_time(self):
"""Reading time based on length of text."""
if self._reading_time is None:
text = self.text(strip_html=True)
words_per_minute = 220
words = len(text.split())
markup = lxml.html.fromstring(self.text(strip_html=False))
embeddables = [".//img", ".//picture", ".//video", ".//audio", ".//object", ".//iframe"]
media_time = 0
for embedded in embeddables:
media_time += (len(markup.findall(embedded)) * 0.33) # +20 seconds
self._reading_time = int(ceil((words / words_per_minute) + media_time)) or 1
return self._reading_time
@property
def remaining_reading_time(self):
"""Remaining reading time based on length of text (does not include teaser)."""
if self._remaining_reading_time is None:
text = self.text(teaser_only=True, strip_html=True)
words_per_minute = 220
words = len(text.split())
self._remaining_reading_time = self.reading_time - int(ceil(words / words_per_minute)) or 1
return self._remaining_reading_time
@property
def paragraph_count(self):
"""Return the paragraph count for this post."""
if self._paragraph_count is None:
# duplicated with Post.text()
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
try:
document = html5lib.html5parser.parse(
data, treebuilder='lxml', namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
# output is a float, for no real reason at all
self._paragraph_count = int(document.xpath('count(//p)'))
return self._paragraph_count
@property
def remaining_paragraph_count(self):
"""Return the remaining paragraph count for this post (does not include teaser)."""
if self._remaining_paragraph_count is None:
try:
# Just asking self.text() is easier here.
document = html5lib.html5parser.parse(
self.text(teaser_only=True, show_read_more_link=False),
treebuilder='lxml', namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
self._remaining_paragraph_count = self.paragraph_count - int(document.xpath('count(//p)'))
return self._remaining_paragraph_count
def source_link(self, lang=None):
"""Return absolute link to the post's source."""
ext = self.source_ext(True)
link = "/" + self.destination_path(lang=lang, extension=ext, sep='/')
link = utils.encodelink(link)
return link
def destination_path(self, lang=None, extension='.html', sep=os.sep):
"""Destination path for this post, relative to output/.
If lang is not specified, it's the current language.
Extension is used in the path if specified.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self._has_pretty_url(lang):
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'], 'index' + extension)
else:
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'] + extension)
if sep != os.sep:
path = path.replace(os.sep, sep)
if path.startswith('./'):
path = path[2:]
return path
def section_color(self, lang=None):
"""Return the color of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_COLORS'](lang):
return self.config['POSTS_SECTION_COLORS'](lang)[slug]
base = self.config['THEME_COLOR']
return utils.colorize_str_from_base_color(slug, base)
def section_link(self, lang=None):
"""Return the link to the post's section (deprecated)."""
utils.LOGGER.warning("Post.section_link is deprecated. Please use " +
"site.link('section_index', post.section_slug()) instead.")
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
slug = self.section_slug(lang)
t = os.path.normpath(self.translations[lang])
if t == '.':
t = ''
link = '/' + '/'.join(i for i in (t, slug) if i) + '/'
if not self.pretty_urls:
link = urljoin(link, self.index_file)
link = utils.encodelink(link)
return link
def section_name(self, lang=None):
"""Return the name of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_NAME'](lang):
name = self.config['POSTS_SECTION_NAME'](lang)[slug]
else:
name = slug.replace('-', ' ').title()
return name
def section_slug(self, lang=None):
"""Return the slug for the post's section."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if not self.config['POSTS_SECTION_FROM_META']:
dest = self.destination_path(lang)
if dest[-(1 + len(self.index_file)):] == os.sep + self.index_file:
dest = dest[:-(1 + len(self.index_file))]
dirname = os.path.dirname(dest)
slug = dest.split(os.sep)
if not slug or dirname == '.':
slug = self.messages[lang]["Uncategorized"]
elif lang == slug[0]:
slug = slug[1]
else:
slug = slug[0]
else:
slug = self.meta[lang]['section'].split(',')[0] if 'section' in self.meta[lang] else self.messages[lang]["Uncategorized"]
return utils.slugify(slug)
def permalink(self, lang=None, absolute=False, extension='.html', query=None):
"""Return permalink for a post."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
# Let compilers override extension (e.g. the php compiler)
if self.compiler.extension() != '.html':
extension = self.compiler.extension()
pieces = self.translations[lang].split(os.sep)
pieces += self.folder.split(os.sep)
if self._has_pretty_url(lang):
pieces += [self.meta[lang]['slug'], 'index' + extension]
else:
pieces += [self.meta[lang]['slug'] + extension]
pieces = [_f for _f in pieces if _f and _f != '.']
link = '/' + '/'.join(pieces)
if absolute:
link = urljoin(self.base_url, link[1:])
index_len = len(self.index_file)
if self.strip_indexes and link[-(1 + index_len):] == '/' + self.index_file:
link = link[:-index_len]
if query:
link = link + "?" + query
link = utils.encodelink(link)
return link
@property
def previewimage(self, lang=None):
"""Return the previewimage path."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
image_path = self.meta[lang]['previewimage']
if not image_path:
return None
# This is further parsed by the template, because we don’t have access
# to the URL replacer here. (Issue #1473)
return image_path
def source_ext(self, prefix=False):
"""Return the source file extension.
If `prefix` is True, a `.src.` prefix will be added to the resulting extension
if it's equal to the destination extension.
"""
ext = os.path.splitext(self.source_path)[1]
# do not publish PHP sources
if prefix and ext == '.html':
# ext starts with a dot
return '.src' + ext
else:
return ext
# Code that fetches metadata from different places
def re_meta(line, match=None):
"""Find metadata using regular expressions."""
if match:
reStr = re.compile('^\.\. {0}: (.*)'.format(re.escape(match)))
else:
reStr = re.compile('^\.\. (.*?): (.*)')
result = reStr.findall(line.strip())
if match and result:
return (match, result[0])
elif not match and result:
return (result[0][0], result[0][1].strip())
else:
return (None,)
def _get_metadata_from_filename_by_regex(filename, metadata_regexp, unslugify_titles):
"""Try to reed the metadata from the filename based on the given re.
This requires to use symbolic group names in the pattern.
The part to read the metadata from the filename based on a regular
expression is taken from Pelican - pelican/readers.py
"""
match = re.match(metadata_regexp, filename)
meta = {}
if match:
# .items() for py3k compat.
for key, value in match.groupdict().items():
k = key.lower().strip() # metadata must be lowercase
if k == 'title' and unslugify_titles:
meta[k] = unslugify(value, discard_numbers=False)
else:
meta[k] = value
return meta
def get_metadata_from_file(source_path, config=None, lang=None):
"""Extract metadata from the file itself, by parsing contents."""
try:
if lang and config:
source_path = get_translation_candidate(config, source_path, lang)
elif lang:
source_path += '.' + lang
with io.open(source_path, "r", encoding="utf-8-sig") as meta_file:
meta_data = [x.strip() for x in meta_file.readlines()]
return _get_metadata_from_file(meta_data)
except (UnicodeDecodeError, UnicodeEncodeError):
raise ValueError('Error reading {0}: Nikola only supports UTF-8 files'.format(source_path))
except Exception: # The file may not exist, for multilingual sites
return {}
re_md_title = re.compile(r'^{0}([^{0}].*)'.format(re.escape('#')))
# Assuming rst titles are going to be at least 4 chars long
# otherwise this detects things like ''' wich breaks other markups.
re_rst_title = re.compile(r'^([{0}]{{4,}})'.format(re.escape(
string.punctuation)))
def _get_title_from_contents(meta_data):
"""Extract title from file contents, LAST RESOURCE."""
piece = meta_data[:]
title = None
for i, line in enumerate(piece):
if re_rst_title.findall(line) and i > 0:
title = meta_data[i - 1].strip()
break
if (re_rst_title.findall(line) and i >= 0 and
re_rst_title.findall(meta_data[i + 2])):
title = meta_data[i + 1].strip()
break
if re_md_title.findall(line):
title = re_md_title.findall(line)[0]
break
return title
def _get_metadata_from_file(meta_data):
"""Extract metadata from a post's source file."""
meta = {}
if not meta_data:
return meta
# Skip up to one empty line at the beginning (for txt2tags)
if not meta_data[0]:
meta_data = meta_data[1:]
# First, get metadata from the beginning of the file,
# up to first empty line
for i, line in enumerate(meta_data):
if not line:
break
match = re_meta(line)
if match[0]:
meta[match[0]] = match[1]
# If we have no title, try to get it from document
if 'title' not in meta:
t = _get_title_from_contents(meta_data)
if t is not None:
meta['title'] = t
return meta
def get_metadata_from_meta_file(path, config=None, lang=None):
"""Take a post path, and gets data from a matching .meta file."""
global _UPGRADE_METADATA_ADVERTISED
meta_path = os.path.splitext(path)[0] + '.meta'
if lang and config:
meta_path = get_translation_candidate(config, meta_path, lang)
elif lang:
meta_path += '.' + lang
if os.path.isfile(meta_path):
with io.open(meta_path, "r", encoding="utf8") as meta_file:
meta_data = meta_file.readlines()
# Detect new-style metadata.
newstyleregexp = re.compile(r'\.\. .*?: .*')
newstylemeta = False
for l in meta_data:
if l.strip():
if re.match(newstyleregexp, l):
newstylemeta = True
if newstylemeta:
# New-style metadata is basically the same as reading metadata from
# a 1-file post.
return get_metadata_from_file(path, config, lang), newstylemeta
else:
if not _UPGRADE_METADATA_ADVERTISED:
LOGGER.warn("Some posts on your site have old-style metadata. You should upgrade them to the new format, with support for extra fields.")
LOGGER.warn("Install the 'upgrade_metadata' plugin (with 'nikola plugin -i upgrade_metadata') and run 'nikola upgrade_metadata'.")
_UPGRADE_METADATA_ADVERTISED = True
while len(meta_data) < 7:
meta_data.append("")
(title, slug, date, tags, link, description, _type) = [
x.strip() for x in meta_data][:7]
meta = {}
if title:
meta['title'] = title
if slug:
meta['slug'] = slug
if date:
meta['date'] = date
if tags:
meta['tags'] = tags
if link:
meta['link'] = link
if description:
meta['description'] = description
if _type:
meta['type'] = _type
return meta, newstylemeta
elif lang:
# Metadata file doesn't exist, but not default language,
# So, if default language metadata exists, return that.
# This makes the 2-file format detection more reliable (Issue #525)
return get_metadata_from_meta_file(path, config, lang=None)
else:
return {}, True
def get_meta(post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Get post's meta from source.
If ``file_metadata_regexp`` is given it will be tried to read
metadata from the filename.
If ``unslugify_titles`` is True, the extracted title (if any) will be unslugified, as is done in galleries.
If any metadata is then found inside the file the metadata from the
file will override previous findings.
"""
meta = defaultdict(lambda: '')
try:
config = post.config
except AttributeError:
config = None
_, newstylemeta = get_metadata_from_meta_file(post.metadata_path, config, lang)
meta.update(_)
if not meta:
post.is_two_file = False
if file_metadata_regexp is not None:
meta.update(_get_metadata_from_filename_by_regex(post.source_path,
file_metadata_regexp,
unslugify_titles))
compiler_meta = {}
if getattr(post, 'compiler', None):
compiler_meta = post.compiler.read_metadata(post, file_metadata_regexp, unslugify_titles, lang)
meta.update(compiler_meta)
if not post.is_two_file and not compiler_meta:
# Meta file has precedence over file, which can contain garbage.
# Moreover, we should not to talk to the file if we have compiler meta.
meta.update(get_metadata_from_file(post.source_path, config, lang))
if lang is None:
# Only perform these checks for the default language
if 'slug' not in meta:
# If no slug is found in the metadata use the filename
meta['slug'] = slugify(unicode_str(os.path.splitext(
os.path.basename(post.source_path))[0]))
if 'title' not in meta:
# If no title is found, use the filename without extension
meta['title'] = os.path.splitext(
os.path.basename(post.source_path))[0]
return meta, newstylemeta
def hyphenate(dom, _lang):
"""Hyphenate a post."""
# circular import prevention
from .nikola import LEGAL_VALUES
lang = None
if pyphen is not None:
lang = LEGAL_VALUES['PYPHEN_LOCALES'].get(_lang, pyphen.language_fallback(_lang))
else:
utils.req_missing(['pyphen'], 'hyphenate texts', optional=True)
if pyphen is not None and lang is not None:
# If pyphen does exist, we tell the user when configuring the site.
# If it does not support a language, we ignore it quietly.
try:
hyphenator = pyphen.Pyphen(lang=lang)
except KeyError:
LOGGER.error("Cannot find hyphenation dictoniaries for {0} (from {1}).".format(lang, _lang))
LOGGER.error("Pyphen cannot be installed to ~/.local (pip install --user).")
for tag in ('p', 'li', 'span'):
for node in dom.xpath("//%s[not(parent::pre)]" % tag):
skip_node = False
skippable_nodes = ['kbd', 'code', 'samp', 'mark', 'math', 'data', 'ruby', 'svg']
if node.getchildren():
for child in node.getchildren():
if child.tag in skippable_nodes or (child.tag == 'span' and 'math' in child.get('class', [])):
skip_node = True
elif 'math' in node.get('class', []):
skip_node = True
if not skip_node:
insert_hyphens(node, hyphenator)
return dom
def insert_hyphens(node, hyphenator):
"""Insert hyphens into a node."""
textattrs = ('text', 'tail')
if isinstance(node, lxml.etree._Entity):
# HTML entities have no .text
textattrs = ('tail',)
for attr in textattrs:
text = getattr(node, attr)
if not text:
continue
new_data = ' '.join([hyphenator.inserted(w, hyphen='\u00AD')
for w in text.split(' ')])
# Spaces are trimmed, we have to add them manually back
if text[0].isspace():
new_data = ' ' + new_data
if text[-1].isspace():
new_data += ' '
setattr(node, attr, new_data)
for child in node.iterchildren():
insert_hyphens(child, hyphenator)
CRYPT = string.Template("""\
<script>
function rc4(key, str) {
var s = [], j = 0, x, res = '';
for (var i = 0; i < 256; i++) {
s[i] = i;
}
for (i = 0; i < 256; i++) {
j = (j + s[i] + key.charCodeAt(i % key.length)) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
}
i = 0;
j = 0;
for (var y = 0; y < str.length; y++) {
i = (i + 1) % 256;
j = (j + s[i]) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
res += String.fromCharCode(str.charCodeAt(y) ^ s[(s[i] + s[j]) % 256]);
}
return res;
}
function decrypt() {
key = $$("#key").val();
crypt_div = $$("#encr")
crypted = crypt_div.html();
decrypted = rc4(key, window.atob(crypted));
if (decrypted.substr(decrypted.length - 11) == "<!--tail-->"){
crypt_div.html(decrypted);
$$("#pwform").hide();
crypt_div.show();
} else { alert("Wrong password"); };
}
</script>
<div id="encr" style="display: none;">${data}</div>
<div id="pwform">
<form onsubmit="javascript:decrypt(); return false;" class="form-inline">
<fieldset>
<legend>This post is password-protected.</legend>
<input type="password" id="key" placeholder="Type password here">
<button type="submit" class="btn">Show Content</button>
</fieldset>
</form>
</div>""")
| mit | 5,035,294,736,049,951,000 | 38.76431 | 159 | 0.582409 | false |
GETLIMS/LIMS-Backend | lims/inventory/migrations/0001_initial.py | 1 | 6145 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import mptt.fields
import gm2m.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AmountMeasure',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('symbol', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('identifier', models.CharField(unique=True, blank=True, null=True, db_index=True, max_length=20)),
('description', models.TextField(blank=True, null=True)),
('in_inventory', models.BooleanField(default=False)),
('amount_available', models.IntegerField(default=0)),
('added_on', models.DateTimeField(auto_now_add=True)),
('last_updated_on', models.DateTimeField(auto_now=True)),
('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('amount_measure', models.ForeignKey(to='inventory.AmountMeasure')),
('created_from', models.ManyToManyField(to='inventory.Item', blank=True, related_name='created_from_rel_+')),
],
),
migrations.CreateModel(
name='ItemProperty',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=200)),
('value', models.TextField()),
('item', models.ForeignKey(to='inventory.Item', related_name='properties')),
],
),
migrations.CreateModel(
name='ItemTransfer',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('amount_taken', models.IntegerField(default=0)),
('barcode', models.CharField(blank=True, null=True, max_length=20)),
('coordinates', models.CharField(blank=True, null=True, max_length=2)),
('transfer_complete', models.BooleanField(default=False)),
('amount_measure', models.ForeignKey(to='inventory.AmountMeasure')),
('item', models.ForeignKey(to='inventory.Item')),
],
),
migrations.CreateModel(
name='ItemType',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=150)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(null=True, related_name='children', to='inventory.ItemType', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('code', models.CharField(unique=True, null=True, max_length=6)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(null=True, related_name='children', to='inventory.Location', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Set',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=40)),
('is_public', models.BooleanField(default=False)),
('is_partset', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='item',
name='item_type',
field=mptt.fields.TreeForeignKey(to='inventory.ItemType'),
),
migrations.AddField(
model_name='item',
name='location',
field=mptt.fields.TreeForeignKey(null=True, to='inventory.Location', blank=True),
),
migrations.AddField(
model_name='item',
name='sets',
field=gm2m.fields.GM2MField('inventory.Set', through_fields=('gm2m_src', 'gm2m_tgt', 'gm2m_ct', 'gm2m_pk'), related_name='items'),
),
migrations.AddField(
model_name='item',
name='tags',
field=models.ManyToManyField(to='inventory.Tag', blank=True),
),
]
| mit | 7,413,653,602,382,370,000 | 46.269231 | 142 | 0.559642 | false |
MKaptein/streamingbandit | app/handlers/adminhandlers.py | 1 | 19180 | # -* coding: utf-8 -*-
import tornado.escape
import tornado.ioloop
import tornado.web
import json
import random
import os
from handlers.basehandler import BaseHandler, ExceptionHandler
from core.experiment import Experiment
from db.database import Database
from db.mongolog import MongoLog
from db.users import Users
class GenerateExperiments(BaseHandler):
def get(self):
""" Retrieve a list of experiments running on this server
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:returns: A JSON containing exp_id and name pairs.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
db = Database()
response = db.get_all_experiments(int(user))
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def post(self):
""" Create a new experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp |
| {"name" : NAME, "get_context" : CODE, "get_action" : CODE, |
| "get_reward" : CODE, "set_reward" : CODE, "advice_id" : True, |
| "hourly_theta" : True, "delta_hours" : DELTA_HOURS, |
| "default_reward" : DEFAULT_REWARD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the POST calls have to be posted in the \
body as a JSON object.
:requires: A secure cookie obtained by logging in.
:param string name: Name of the experiment.
:param string get_context (optional): String of python code for get context code.
:param string get_action (optional): String of python code for get action code.
:param string get_reward (optional): String of python code for get reward code.
:param string set_reward (optional): String of python code for set reward code.
:param bool hourly_theta: Bool indicating whether the state of Theta should be stored hourly.
:param bool advice_id: Bool indicating whether the getadvice and setreward calls should return an advice_id.
:param int delta_hours: If advice_id is True, supply this to give the number of hours that an advice_id should be stored (between 0 and 99999).
:param dict default_reward: If advice_id is True, supply this to give the default reward for advice_id's that are over their delta_hours limit.
:returns: A JSON of the form:
.. code-block:: json
{
"id" : The assigned experiment id,
"name" : The name of the experiment (checked for duplicates),
"error" : (Optional) error message
"key" : The key for the experiment
}
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
data = tornado.escape.json_decode(self.request.body)
exp_obj = {}
exp_obj["user_id"] = int(user)
exp_obj["name"] = data["name"]
if "get_context" in data:
exp_obj["get_context"] = data["get_context"]
else:
exp_obj["get_context"] = ""
if "get_action" in data:
exp_obj["get_action"] = data["get_action"]
else:
exp_obj["get_action"] = ""
if "get_reward" in data:
exp_obj["get_reward"] = data["get_reward"]
else:
exp_obj["get_reward"] = ""
if "set_reward" in data:
exp_obj["set_reward"] = data["set_reward"]
else:
exp_obj["set_reward"] = ""
exp_obj["hourly_theta"] = data["hourly_theta"]
exp_obj["advice_id"] = data["advice_id"]
if exp_obj["advice_id"] in ["true", "True", "y", "yes"]:
exp_obj["advice_id"] = "True"
else:
exp_obj["advice_id"] = "False"
if exp_obj["advice_id"] is "True":
if 0 <= int(data["delta_hours"]) <= 99999:
exp_obj["delta_hours"] = data["delta_hours"]
else:
raise ExceptionHandler(reason = "Supplied number for delta hours must be between 0 and 99999.", status_code = 400)
exp_obj["default_reward"] = data["default_reward"]
exp_obj["key"] = hex(random.getrandbits(42))[2:-1]
db = Database()
insertid = db.insert_experiment(exp_obj)
response = {}
response["name"] = exp_obj["name"]
response["id"] = insertid
response["key"] = exp_obj["key"]
response["error"] = False
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class UpdateExperiment(BaseHandler):
def get(self, exp_id):
""" Retrieve a specific experiment running on this server
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: Experiment ID for the experiment that is to be retrieved.
:returns: A JSON containing all the info for the expriment.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
db = Database()
response = db.get_one_experiment(exp_id)
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def delete(self, exp_id):
""" Delete an experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: The ID of the experiment to be deleted.
:returns: A JSON showing the deleted experiment.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
db = Database()
mongo_db = MongoLog()
response = db.delete_experiment(exp_id)
mongo_db.log_deleted_experiment(response)
self.write(json.dumps(response['exp_id']))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def put(self, exp_id):
""" Edit an experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
| {"name" : NAME, "getcontext" : CODE, "getaction" : CODE, |
| "getreward" : CODE, "setreward" : CODE, "advice_id" : True, |
| "hourly_theta" : True, "delta_hours" : DELTA_HOURS, |
| "default_reward" : DEFAULT_REWARD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the PUT calls have to be posted in the \
body as a JSON object.
:requires: A secure cookie obtained by logging in.
:param string name: Name of the experiment.
:param string get_context (optional): String of python code for get context code.
:param string get_action (optional): String of python code for get action code.
:param string get_reward (optional): String of python code for get reward code.
:param string set_reward (optional): String of python code for set reward code.
:param bool hourly_theta: Bool indicating whether the state of Theta should be stored hourly.
:param bool advice_id: Bool indicating whether the getAdvice and setReward calls should return an advice_id.
:param int delta_hours: If advice_id is True, supply this to give the number of hours that an advice_id should be stored.
:param dict default_reward: If advice_id is True, supply this to give the default reward for advice_id's that are over their delta_hours limit.
:returns: A JSON indicating success.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
if self.validate_user_experiment(exp_id):
data = tornado.escape.json_decode(self.request.body)
exp_obj = {}
exp_obj["user_id"] = int(user)
exp_obj["name"] = data["name"]
if "get_context" in data:
exp_obj["get_context"] = data["get_context"]
else:
exp_obj["get_context"] = ""
if "get_action" in data:
exp_obj["get_action"] = data["get_action"]
else:
exp_obj["get_action"] = ""
if "get_reward" in data:
exp_obj["get_reward"] = data["get_reward"]
else:
exp_obj["get_reward"] = ""
if "set_reward" in data:
exp_obj["set_reward"] = data["set_reward"]
else:
exp_obj["set_reward"] = ""
exp_obj["hourly_theta"] = data["hourly_theta"]
exp_obj["advice_id"] = data["advice_id"]
if exp_obj["advice_id"] in ["true", "True", "y", "yes"]:
exp_obj["advice_id"] = "True"
else:
exp_obj["advice_id"] = "False"
if exp_obj["advice_id"] is "True":
if 0 <= int(data["delta_hours"]) <= 99999:
exp_obj["delta_hours"] = data["delta_hours"]
else:
raise ExceptionHandler(reason = "Supplied number for delta hours must be between 0 and 99999.", status_code = 400)
exp_obj["default_reward"] = data["default_reward"]
db = Database()
response = {}
response["id"] = db.edit_experiment(exp_obj, exp_id)
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class ListDefaults(BaseHandler):
def get(self):
""" Get the list with default available experiments.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/defaults |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:returns: A JSON with the default experiments.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
folderdata = sorted([f for f in os.listdir("./defaults") if not f.startswith('.')])
folderdata = [x.replace("_"," ") for x in folderdata]
folders = dict(enumerate(folderdata))
self.write(folders)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class GetDefault(BaseHandler):
def get(self, default_id):
""" Retrieve properties of a default experiment.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/defaults/DEFAULT_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int default_id: The ID of the default experiment.
:returns: A JSON containing the experiment properties.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
folderdata = sorted([f for f in os.listdir("./defaults") if not f.startswith('.')])
folderdata = dict(enumerate(folderdata))
data={}
data["name"] = folderdata[int(default_id)]
filenames = ["get_context", "get_action", "get_reward", "set_reward"]
for filename in filenames:
if os.path.isfile("./defaults/"+data["name"]+"/"+filename+".py"):
data[filename] = open("./defaults/"+data["name"]+"/"+filename+".py").read()
self.write(data)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class ResetExperiment(BaseHandler):
def get(self, exp_id):
""" Reset the theta of an experiment.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID/resetexperiment?key=KEY |
| &theta_key=THETA_KEY&theta_value=THETA_VALUE |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: The experiment ID.
:param string key: The key of the experiment.
:param string theta_key (optional): The key for theta used when setting \
theta in the setReward and getAction code.
:param string theta_value (optional): The value for theta used when \
setting theta in the setReward and getAction code.
:raises 401: If the theta_key or theta_value does not exist or is not valid.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
if self.validate_user_experiment(exp_id):
key = self.get_argument("key", default = False)
theta_key = self.get_argument("theta_key", default = None)
theta_value = self.get_argument("theta_value", default = None)
__EXP__ = Experiment(exp_id, key)
status = __EXP__.delete_theta(key = theta_key, value = theta_value)
if status >= 1:
self.write(json.dumps({'status':'success'}))
else:
raise ExceptionHandler(reason = "Theta_key or theta_value could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class AddUser(BaseHandler):
def post(self):
""" Add a user to StreamingBandit.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/user |
| {"username" : USERNAME, "password" : PASSWORD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the POST calls have to be posted in the \
body as a JSON object.
:param string username: The preferred username.
:param string password: The preferred password.
:returns: JSON indicating success.
:raises 400: If user with username already exists.
"""
if self.valid_admin():
data = tornado.escape.json_decode(self.request.body)
users = Users()
username = data["username"]
password = data["password"]
user_id = users.create_user(username, password)
if user_id is False:
raise ExceptionHandler(reason = "User already exists.", status_code = 400)
else:
self.write(json.dumps({'status' : 'success'}))
else:
raise ExceptionHandler(reason = "You are not an admin.", status_code = 401)
| mit | 3,692,329,213,822,720,500 | 49.209424 | 151 | 0.4756 | false |
madhav-datt/spell-check | pdf_text/pdf2txt.py | 1 | 4620 | #
# pdf2txt.py from the FOSS python PDFMiner package
# http://euske.github.io/pdfminer/index.html#pdf2txt
# Extract text from PDF to text files
#
# Has to be run separately with python2.X (not compatible with python3.X)
#
import sys
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams
from pdfminer.image import ImageWriter
def main(argv):
import getopt
def usage():
print('usage: %s [-d] [-p pagenos] [-m maxpages] [-P password] [-o output]'
' [-C] [-n] [-A] [-V] [-M char_margin] [-L line_margin] [-W word_margin]'
' [-F boxes_flow] [-Y layout_mode] [-O output_dir] [-R rotation]'
' [-t text|html|xml|tag] [-c codec] [-s scale]'
' file ...' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'dp:m:P:o:CnAVM:L:W:F:Y:O:R:t:c:s:')
except getopt.GetoptError:
return usage()
if not args:
return usage()
# debug option
debug = 0
# input option
password = ''
pagenos = set()
maxpages = 0
# output option
outfile = None
outtype = None
imagewriter = None
rotation = 0
layoutmode = 'normal'
codec = 'utf-8'
pageno = 1
scale = 1
caching = True
showpageno = True
laparams = LAParams()
for (k, v) in opts:
if k == '-d':
debug += 1
elif k == '-p':
pagenos.update(int(x) - 1 for x in v.split(','))
elif k == '-m':
maxpages = int(v)
elif k == '-P':
password = v
elif k == '-o':
outfile = v
elif k == '-C':
caching = False
elif k == '-n':
laparams = None
elif k == '-A':
laparams.all_texts = True
elif k == '-V':
laparams.detect_vertical = True
elif k == '-M':
laparams.char_margin = float(v)
elif k == '-L':
laparams.line_margin = float(v)
elif k == '-W':
laparams.word_margin = float(v)
elif k == '-F':
laparams.boxes_flow = float(v)
elif k == '-Y':
layoutmode = v
elif k == '-O':
imagewriter = ImageWriter(v)
elif k == '-R':
rotation = int(v)
elif k == '-t':
outtype = v
elif k == '-c':
codec = v
elif k == '-s':
scale = float(v)
#
PDFDocument.debug = debug
PDFParser.debug = debug
CMapDB.debug = debug
PDFResourceManager.debug = debug
PDFPageInterpreter.debug = debug
PDFDevice.debug = debug
#
rsrcmgr = PDFResourceManager(caching=caching)
if not outtype:
outtype = 'text'
if outfile:
if outfile.endswith('.htm') or outfile.endswith('.html'):
outtype = 'html'
elif outfile.endswith('.xml'):
outtype = 'xml'
elif outfile.endswith('.tag'):
outtype = 'tag'
if outfile:
outfp = file(outfile, 'w')
else:
outfp = sys.stdout
if outtype == 'text':
device = TextConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'xml':
device = XMLConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'html':
device = HTMLConverter(rsrcmgr, outfp, codec=codec, scale=scale,
layoutmode=layoutmode, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'tag':
device = TagExtractor(rsrcmgr, outfp, codec=codec)
else:
return usage()
for fname in args:
fp = file(fname, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, pagenos,
maxpages=maxpages, password=password,
caching=caching, check_extractable=True):
page.rotate = (page.rotate + rotation) % 360
interpreter.process_page(page)
fp.close()
device.close()
outfp.close()
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 | 2,343,243,574,910,225,400 | 30.643836 | 87 | 0.540693 | false |
cedadev/ndg_security_common | setup.py | 1 | 2632 | #!/usr/bin/env python
"""Distribution Utilities setup program for NDG Security Package
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "24/04/06"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
# Bootstrap setuptools if necessary.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
import os
# Packages needed for NDG Security
# Note commented out ones fail with PyPI - use explicit link instead
_PKG_DEPENDENCIES = [
'ndg-httpsclient',
'ndg_saml',
'ndg_xacml'
]
# Python 2.5 includes ElementTree by default
if sys.version_info[0:2] < (2, 5):
_PKG_DEPENDENCIES += ['ElementTree', 'cElementTree']
THIS_DIR = os.path.dirname(__file__)
try:
LONG_DESCR = open(os.path.join(THIS_DIR, 'README.md')).read()
except IOError:
LONG_DESCR = """\
NDG Security package for components common to client and server side
====================================================================
NDG Security is the security system originally developed for the UK Natural
Environment Research Council funded NERC DataGrid. It's a system to provide
federated access control and identity management and has been applied for use
with the Earth System Grid Federation.
"""
setup(
name = 'ndg_security_common',
version = '2.5.0',
description = 'NERC DataGrid Security package containing common '
'utilities used by both server and client '
'packages',
long_description = LONG_DESCR,
author = 'Philip Kershaw',
author_email = '[email protected]',
maintainer = 'Philip Kershaw',
maintainer_email = '[email protected]',
url = 'https://github.com/cedadev/ndg_security_common',
license = 'BSD - See LICENCE file for details',
install_requires = _PKG_DEPENDENCIES,
extras_require = {
# M2Crypto is required for SSL Client based validation of OpenID
# Providers
'openid_relying_party_provider_validation': ["M2Crypto"],
},
dependency_links = ["http://dist.ceda.ac.uk/pip/"],
packages = find_packages(),
namespace_packages = ['ndg', 'ndg.security'],
entry_points = None,
test_suite = 'ndg.security.common.test',
zip_safe = False
)
| bsd-3-clause | 2,917,347,022,940,199,400 | 34.093333 | 79 | 0.628799 | false |
patrickdw123/ParanoiDF | html.py | 1 | 10764 | # ParanoiDF. A combination of several PDF analysis/manipulation tools to
# produce one of the most technically useful PDF analysis tools.
#
# Idea proposed by Julio Hernandez-Castro, University of Kent, UK.
# By Patrick Wragg
# University of Kent
# 21/07/2014
#
# With thanks to:
# Julio Hernandez-Castro, my supervisor.
# Jose Miguel Esparza for writing PeePDF (the basis of this tool).
# Didier Stevens for his "make-PDF" tools.
# Blake Hartstein for Jsunpack-n.
# Yusuke Shinyama for Pdf2txt.py (PDFMiner)
# Nacho Barrientos Arias for Pdfcrack.
# Kovid Goyal for Calibre (DRM removal).
# Jay Berkenbilt for QPDF.
#
# Copyright (C) 2014-2018 Patrick Wragg
#
# This file is part of ParanoiDF.
#
# ParanoiDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ParanoiDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ParanoiDF. If not, see <http://www.gnu.org/licenses/>.
#
# This was written by Blake Hartstein for Jsunpack-n.
'''
Jsunpackn - A generic JavaScript Unpacker Network Edition
Copyright (C) 2010 Blake Hartstein
http://jsunpack.jeek.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import re
import sys
try:
from bs4 import BeautifulSoup
except ImportError:
# BeautifulSoup 4.x not installed trying BeautifulSoup 3.x
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
print ('BeautifulSoup not installed')
exit(-1)
class Parser:
'''
A simple HTML language parser. Uses the 'htmlparse.conf' file to define rules.
Please read that file for more information on the syntax
<Parser obj>.storage is a 'special' return field. You should only use it if you
wish to get the result in python instead of via an output string.
'''
debug = False
def __init__(self, htmlparseconfig):
self.storage = []
self.html_definitions = {}
self.html_filters = {}
self.html_parse_rules = []
try:
htmlrules = htmlparseconfig.splitlines()
except:
htmlrules = []
print 'Problem while parsing HTML parsing rules'
line = 0
for htmlrule in htmlrules:
line += 1
htmlrule = re.sub('\n', '', htmlrule)
if not re.match('^\s*$|^#', htmlrule):
htmlrule = re.sub('[ \t]+', ' ', htmlrule)
field = htmlrule.split(' ')
if htmlrule.startswith('!define'):
if len(field) > 1:
name, value = field[1], ' '.join(field[2:])
self.html_definitions[name] = value
elif htmlrule.startswith('!parse'):
if len(field) == 4:
tag = field[1]
if tag == '*':
tag = True
attrib = {}
invals = field[2].split(',')
for val in invals:
if val == '*' or val.startswith('!'):
pass
else:
attrib[val] = True
hformat, outvals = field[3].split(':')
outvals = outvals.split(',')
self.html_parse_rules.append([tag, attrib, invals,
hformat, outvals])
elif htmlrule.startswith('!filter'):
if len(field) > 2:
tag, value = field[1], ' '.join(field[2:])
self.html_filters[tag] = re.sub('^\s+|\s+$', '', value)
else:
print 'fatal: invalid htmlparse.config line: %d' % line
if self.debug:
print ('done loading htmlparse, (%d parse_rules, %d definitions, '
'%d filters)' % (len(self.html_parse_rules),
len(self.html_definitions),
len(self.html_filters)))
def htmlparse(self, data):
'''
Input: can be html code or raw JavaScript code
Output: an array of [headers, raw JavaScript]
'''
outheader, out = '', ''
data = re.sub('\x00', '', data)
try:
soup = BeautifulSoup(data)
except:
print('Fatal error during HTML parsing')
return '', ''
for tag, attrib, invals, hformat, outvals in self.html_parse_rules:
for htm in soup.findAll(tag, attrib):
now = {}
ignore = False #if a negated match occurs
for val in invals:
if val.startswith('!'):
#negated match
val = val[1:]
try:
now[val] = str(htm[val])
ignore = True
except:
pass #expected behavior
if not ignore:
for val in outvals:
if val == '*':
now['*'] = ''
elif val == 'contents':
try:
now['contents'] = ' '.join(map(str,
htm.contents))
except KeyError:
now['contents'] = ''
except UnicodeEncodeError:
now['contents'] = ' '.join(map(str,
str(htm.contents)
))
elif val == 'name':
try:
now['name'] = htm.name
except KeyError:
now['name'] = ''
else:
try:
now[val] = str(htm[val])
except KeyError:
now[val] = ''
#normalize when assigning to variables
for k in now:
# if this fails, it means that we are trying to get the
# result in python
if hformat in self.html_definitions:
if not hformat.startswith('raw'):
now[k] = re.sub('([^a-zA-Z0-9])',
lambda m: ('\\x%02x'
% ord(m.group(1))),
now[k])
now[k] = "'%s'" % now[k]
# if this fails, it means that we are trying to get the
# result in python
if hformat in self.html_definitions:
myfmt = re.sub('^\s+', '',
self.html_definitions[hformat]
).split('%s')
if len(myfmt) - 1 == len(outvals):
lineout = ''
for i in range(0, len(outvals)):
lineout += myfmt[i]
lineout += now[outvals[i]]
lineout += myfmt[-1] + '\n'
if htm.name in self.html_filters:
lineout = re.sub(self.html_filters[htm.name],
'', lineout)
if '*' in self.html_filters:
lineout = re.sub(self.html_filters['*'], '',
lineout, re.I)
if hformat.startswith('header'):
outheader += lineout
else:
out += lineout
else:
print ('fatal: invalid htmlparse.config hformat, '
'parameter count or definition problem')
else:
for i in range(0, len(outvals)):
self.storage.append([hformat, now[outvals[i]]])
return str(outheader), str(out)
def main():
'''
Testing html Parser with pdf as input
'''
Parser.debug = True
#fin = open('htmlparse.config', 'r')
#htmlparseconfig = fin.read()
#fin.close()
pdfparseconfig = '''
!define rawSCRIPT ;%s
!parse script * rawSCRIPT:contents
!parse imagefield1 * to_python:contents
!filter script <[/]?script[^>]*>|<!--|//-->
!filter * ^javascript:\s*|^return\s+
'''
#xfa:contenttype
hparser = Parser(pdfparseconfig)
#hparser = Parser(htmlparseconfig)
for infile in sys.argv[1:]:
fin = open(infile, 'rb')
data = fin.read()
fin.close()
parsed_header, parsed = hparser.htmlparse(data)
parsed = parsed_header + parsed
if len(parsed) > 0:
fout = open('%s.out' % infile, 'wb')
fout.write(parsed)
fout.close()
print 'Wrote %s.out (%d bytes)' % (infile, len(parsed))
else:
print 'Nothing parsed for %s' % infile
if __name__ == '__main__':
main()
| gpl-3.0 | 8,699,529,066,536,838,000 | 39.164179 | 84 | 0.464604 | false |
ByteInternet/libcloud | libcloud/compute/drivers/openstack.py | 1 | 143549 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack driver
"""
from libcloud.common.exceptions import BaseHTTPError
from libcloud.utils.iso8601 import parse_date
try:
import simplejson as json
except ImportError:
import json
import warnings
import base64
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import next
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack import OpenStackDriverMixin
from libcloud.common.openstack import OpenStackException
from libcloud.common.openstack import OpenStackResponse
from libcloud.utils.networking import is_public_subnet
from libcloud.compute.base import NodeSize, NodeImage, NodeImageMember, \
UuidMixin
from libcloud.compute.base import (NodeDriver, Node, NodeLocation,
StorageVolume, VolumeSnapshot)
from libcloud.compute.base import KeyPair
from libcloud.compute.types import NodeState, StorageVolumeState, Provider, \
VolumeSnapshotState, Type
from libcloud.pricing import get_size_price
from libcloud.utils.xml import findall
from libcloud.utils.py3 import ET
__all__ = [
'OpenStack_1_0_Response',
'OpenStack_1_0_Connection',
'OpenStack_1_0_NodeDriver',
'OpenStack_1_0_SharedIpGroup',
'OpenStack_1_0_NodeIpAddresses',
'OpenStack_1_1_Response',
'OpenStack_1_1_Connection',
'OpenStack_1_1_NodeDriver',
'OpenStack_1_1_FloatingIpPool',
'OpenStack_2_FloatingIpPool',
'OpenStack_1_1_FloatingIpAddress',
'OpenStack_2_PortInterfaceState',
'OpenStack_2_PortInterface',
'OpenStackNodeDriver'
]
ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
DEFAULT_API_VERSION = '1.1'
PAGINATION_LIMIT = 1000
class OpenStackComputeConnection(OpenStackBaseConnection):
# default config for http://devstack.org/
service_type = 'compute'
service_name = 'nova'
service_region = 'RegionOne'
class OpenStackImageConnection(OpenStackBaseConnection):
service_type = 'image'
service_name = 'glance'
service_region = 'RegionOne'
class OpenStackNetworkConnection(OpenStackBaseConnection):
service_type = 'network'
service_name = 'neutron'
service_region = 'RegionOne'
class OpenStackVolumeV2Connection(OpenStackBaseConnection):
service_type = 'volumev2'
service_name = 'cinderv2'
service_region = 'RegionOne'
class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin):
"""
Base OpenStack node driver. Should not be used directly.
"""
api_name = 'openstack'
name = 'OpenStack'
website = 'http://openstack.org/'
NODE_STATE_MAP = {
'BUILD': NodeState.PENDING,
'REBUILD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'SUSPENDED': NodeState.SUSPENDED,
'SHUTOFF': NodeState.STOPPED,
'DELETED': NodeState.TERMINATED,
'QUEUE_RESIZE': NodeState.PENDING,
'PREP_RESIZE': NodeState.PENDING,
'VERIFY_RESIZE': NodeState.RUNNING,
'PASSWORD': NodeState.PENDING,
'RESCUE': NodeState.PENDING,
'REBOOT': NodeState.REBOOTING,
'HARD_REBOOT': NodeState.REBOOTING,
'SHARE_IP': NodeState.PENDING,
'SHARE_IP_NO_CONFIG': NodeState.PENDING,
'DELETE_IP': NodeState.PENDING,
'ERROR': NodeState.ERROR,
'UNKNOWN': NodeState.UNKNOWN
}
# http://developer.openstack.org/api-ref-blockstorage-v2.html#volumes-v2
VOLUME_STATE_MAP = {
'creating': StorageVolumeState.CREATING,
'available': StorageVolumeState.AVAILABLE,
'attaching': StorageVolumeState.ATTACHING,
'in-use': StorageVolumeState.INUSE,
'deleting': StorageVolumeState.DELETING,
'error': StorageVolumeState.ERROR,
'error_deleting': StorageVolumeState.ERROR,
'backing-up': StorageVolumeState.BACKUP,
'restoring-backup': StorageVolumeState.BACKUP,
'error_restoring': StorageVolumeState.ERROR,
'error_extending': StorageVolumeState.ERROR,
}
# http://developer.openstack.org/api-ref-blockstorage-v2.html#ext-backups-v2
SNAPSHOT_STATE_MAP = {
'creating': VolumeSnapshotState.CREATING,
'available': VolumeSnapshotState.AVAILABLE,
'deleting': VolumeSnapshotState.DELETING,
'error': VolumeSnapshotState.ERROR,
'restoring': VolumeSnapshotState.RESTORING,
'error_restoring': VolumeSnapshotState.ERROR
}
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is OpenStackNodeDriver:
if api_version == '1.0':
cls = OpenStack_1_0_NodeDriver
elif api_version == '1.1':
cls = OpenStack_1_1_NodeDriver
elif api_version in ['2.0', '2.1', '2.2']:
cls = OpenStack_2_NodeDriver
else:
raise NotImplementedError(
"No OpenStackNodeDriver found for API version %s" %
(api_version))
return super(OpenStackNodeDriver, cls).__new__(cls)
def __init__(self, *args, **kwargs):
OpenStackDriverMixin.__init__(self, **kwargs)
super(OpenStackNodeDriver, self).__init__(*args, **kwargs)
@staticmethod
def _paginated_request(url, obj, connection, params=None):
"""
Perform multiple calls in order to have a full list of elements when
the API responses are paginated.
:param url: API endpoint
:type url: ``str``
:param obj: Result object key
:type obj: ``str``
:param connection: The API connection to use to perform the request
:type connection: ``obj``
:param params: Any request parameters
:type params: ``dict``
:return: ``list`` of API response objects
:rtype: ``list``
"""
params = params or {}
objects = list()
loop_count = 0
while True:
data = connection.request(url, params=params)
values = data.object.get(obj, list())
objects.extend(values)
links = data.object.get('%s_links' % obj, list())
next_links = [n for n in links if n['rel'] == 'next']
if next_links:
next_link = next_links[0]
query = urlparse.urlparse(next_link['href'])
# The query[4] references the query parameters from the url
params.update(parse_qs(query[4]))
else:
break
# Prevent the pagination from looping indefinitely in case
# the API returns a loop for some reason.
loop_count += 1
if loop_count > PAGINATION_LIMIT:
raise OpenStackException(
'Pagination limit reached for %s, the limit is %d. '
'This might indicate that your API is returning a '
'looping next target for pagination!' % (
url, PAGINATION_LIMIT
), None
)
return {obj: objects}
def destroy_node(self, node):
uri = '/servers/%s' % (node.id)
resp = self.connection.request(uri, method='DELETE')
# The OpenStack and Rackspace documentation both say this API will
# return a 204, but in-fact, everyone everywhere agrees it actually
# returns a 202, so we are going to accept either, and someday,
# someone will fix either the implementation or the documentation to
# agree.
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def reboot_node(self, node):
# pylint: disable=no-member
return self._reboot_node(node, reboot_type='HARD')
def list_nodes(self, ex_all_tenants=False):
"""
List the nodes in a tenant
:param ex_all_tenants: List nodes for all the tenants. Note: Your user
must have admin privileges for this
functionality to work.
:type ex_all_tenants: ``bool``
"""
params = {}
if ex_all_tenants:
params = {'all_tenants': 1}
# pylint: disable=no-member
return self._to_nodes(
self.connection.request('/servers/detail', params=params).object)
def create_volume(self, size, name, location=None, snapshot=None,
ex_volume_type=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:param ex_volume_type: What kind of volume to create.
(optional)
:type ex_volume_type: ``str``
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
volume = {
'display_name': name,
'display_description': name,
'size': size,
'metadata': {
'contents': name,
},
}
if ex_volume_type:
volume['volume_type'] = ex_volume_type
if location:
volume['availability_zone'] = location
if snapshot:
volume['snapshot_id'] = snapshot.id
resp = self.connection.request('/os-volumes',
method='POST',
data={'volume': volume})
# pylint: disable=no-member
return self._to_volume(resp.object)
def destroy_volume(self, volume):
return self.connection.request('/os-volumes/%s' % volume.id,
method='DELETE').success()
def attach_volume(self, node, volume, device="auto"):
# when "auto" or None is provided for device, openstack will let
# the guest OS pick the next available device (fi. /dev/vdb)
return self.connection.request(
'/servers/%s/os-volume_attachments' % node.id,
method='POST',
data={
'volumeAttachment': {
'volumeId': volume.id,
'device': device,
}
}).success()
def detach_volume(self, volume, ex_node=None):
# when ex_node is not provided, volume is detached from all nodes
failed_nodes = []
for attachment in volume.extra['attachments']:
if not ex_node or ex_node.id in filter(None, (attachment.get(
'serverId'
), attachment.get('server_id'))):
response = self.connection.request(
'/servers/%s/os-volume_attachments/%s' %
(attachment.get('serverId') or attachment['server_id'],
attachment['id']),
method='DELETE')
if not response.success():
failed_nodes.append(
attachment.get('serverId') or attachment['server_id']
)
if failed_nodes:
raise OpenStackException(
'detach_volume failed for nodes with id: %s' %
', '.join(failed_nodes), 500, self
)
return True
def list_volumes(self):
# pylint: disable=no-member
return self._to_volumes(
self.connection.request('/os-volumes').object)
def ex_get_volume(self, volumeId):
# pylint: disable=no-member
return self._to_volume(
self.connection.request('/os-volumes/%s' % volumeId).object)
def list_images(self, location=None, ex_only_active=True):
"""
Lists all active images
@inherits: :class:`NodeDriver.list_images`
:param ex_only_active: True if list only active (optional)
:type ex_only_active: ``bool``
"""
# pylint: disable=no-member
return self._to_images(
self.connection.request('/images/detail').object, ex_only_active)
def get_image(self, image_id):
"""
Get an image based on an image_id
@inherits: :class:`NodeDriver.get_image`
:param image_id: Image identifier
:type image_id: ``str``
:return: A NodeImage object
:rtype: :class:`NodeImage`
"""
# pylint: disable=no-member
return self._to_image(self.connection.request(
'/images/%s' % (image_id,)).object['image'])
def list_sizes(self, location=None):
# pylint: disable=no-member
return self._to_sizes(
self.connection.request('/flavors/detail').object)
def list_locations(self):
return [NodeLocation(0, '', '', self)]
def _ex_connection_class_kwargs(self):
return self.openstack_connection_kwargs()
def ex_get_node_details(self, node_id):
"""
Lists details of the specified server.
:param node_id: ID of the node which should be used
:type node_id: ``str``
:rtype: :class:`Node`
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s' % (node_id)
try:
resp = self.connection.request(uri, method='GET')
except BaseHTTPError as e:
if e.code == httplib.NOT_FOUND:
return None
raise
# pylint: disable=no-member
return self._to_node_from_obj(resp.object)
def ex_soft_reboot_node(self, node):
"""
Soft reboots the specified server
:param node: node
:type node: :class:`Node`
:rtype: ``bool``
"""
# pylint: disable=no-member
return self._reboot_node(node, reboot_type='SOFT')
def ex_hard_reboot_node(self, node):
"""
Hard reboots the specified server
:param node: node
:type node: :class:`Node`
:rtype: ``bool``
"""
# pylint: disable=no-member
return self._reboot_node(node, reboot_type='HARD')
class OpenStackNodeSize(NodeSize):
"""
NodeSize class for the OpenStack.org driver.
Following the example of OpenNebula.org driver
and following guidelines:
https://issues.apache.org/jira/browse/LIBCLOUD-119
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
vcpus=None, ephemeral_disk=None, swap=None, extra=None):
super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram,
disk=disk,
bandwidth=bandwidth,
price=price, driver=driver)
self.vcpus = vcpus
self.ephemeral_disk = ephemeral_disk
self.swap = swap
self.extra = extra
def __repr__(self):
return (('<OpenStackNodeSize: id=%s, name=%s, ram=%s, disk=%s, '
'bandwidth=%s, price=%s, driver=%s, vcpus=%s, ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name, self.vcpus))
class OpenStack_1_0_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_0_NodeDriver
super(OpenStack_1_0_Response, self).__init__(*args, **kwargs)
class OpenStack_1_0_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_0_Response
default_content_type = 'application/xml; charset=UTF-8'
accept_format = 'application/xml'
XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
class OpenStack_1_0_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
Extra node attributes:
- password: root password, available after create.
- hostId: represents the host your cloud server runs on
- imageId: id of image
- flavorId: id of flavor
"""
connectionCls = OpenStack_1_0_Connection
type = Provider.OPENSTACK
features = {'create_node': ['generates_password']}
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE
super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs)
def _to_images(self, object, ex_only_active):
images = []
for image in findall(object, 'image', self.XML_NAMESPACE):
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, element):
return NodeImage(id=element.get('id'),
name=element.get('name'),
driver=self.connection.driver,
extra={'updated': element.get('updated'),
'created': element.get('created'),
'status': element.get('status'),
'serverId': element.get('serverId'),
'progress': element.get('progress'),
'minDisk': element.get('minDisk'),
'minRam': element.get('minRam')
}
)
def _change_password_or_name(self, node, name=None, password=None):
uri = '/servers/%s' % (node.id)
if not name:
name = node.name
body = {'xmlns': self.XML_NAMESPACE,
'name': name}
if password is not None:
body['adminPass'] = password
server_elm = ET.Element('server', body)
resp = self.connection.request(
uri, method='PUT', data=ET.tostring(server_elm))
if resp.status == httplib.NO_CONTENT and password is not None:
node.extra['password'] = password
return resp.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
"""
Create a new node
@inherits: :class:`NodeDriver.create_node`
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_files: File Path => File contents to create on
the node
:type ex_files: ``dict``
:keyword ex_shared_ip_group_id: The server is launched into
that shared IP group
:type ex_shared_ip_group_id: ``str``
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
attributes = {'xmlns': self.XML_NAMESPACE,
'name': name,
'imageId': str(image.id),
'flavorId': str(size.id)}
if 'ex_shared_ip_group' in kwargs:
# Deprecate this. Be explicit and call the variable
# ex_shared_ip_group_id since user needs to pass in the id, not the
# name.
warnings.warn('ex_shared_ip_group argument is deprecated.'
' Please use ex_shared_ip_group_id')
if 'ex_shared_ip_group_id' in kwargs:
shared_ip_group_id = kwargs['ex_shared_ip_group_id']
attributes['sharedIpGroupId'] = shared_ip_group_id
server_elm = ET.Element('server', attributes)
metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {}))
if metadata_elm:
server_elm.append(metadata_elm)
files_elm = self._files_to_xml(kwargs.get("ex_files", {}))
if files_elm:
server_elm.append(files_elm)
resp = self.connection.request("/servers",
method='POST',
data=ET.tostring(server_elm))
return self._to_node(resp.object)
def ex_set_password(self, node, password):
"""
Sets the Node's root password.
This will reboot the instance to complete the operation.
:class:`Node.extra['password']` will be set to the new value if the
operation was successful.
:param node: node to set password
:type node: :class:`Node`
:param password: new password.
:type password: ``str``
:rtype: ``bool``
"""
return self._change_password_or_name(node, password=password)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
This will reboot the instance to complete the operation.
:param node: node to set name
:type node: :class:`Node`
:param name: new name
:type name: ``str``
:rtype: ``bool``
"""
return self._change_password_or_name(node, name=name)
def ex_resize_node(self, node, size):
"""
Change an existing server flavor / scale the server up or down.
:param node: node to resize.
:type node: :class:`Node`
:param size: new size.
:type size: :class:`NodeSize`
:rtype: ``bool``
"""
elm = ET.Element(
'resize',
{'xmlns': self.XML_NAMESPACE,
'flavorId': str(size.id)}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_resize(self, node, size):
"""
NOTE: This method is here for backward compatibility reasons.
You should use ``ex_resize_node`` instead.
"""
return self.ex_resize_node(node=node, size=size)
def ex_confirm_resize(self, node):
"""
Confirm a resize request which is currently in progress. If a resize
request is not explicitly confirmed or reverted it's automatically
confirmed after 24 hours.
For more info refer to the API documentation: http://goo.gl/zjFI1
:param node: node for which the resize request will be confirmed.
:type node: :class:`Node`
:rtype: ``bool``
"""
elm = ET.Element(
'confirmResize',
{'xmlns': self.XML_NAMESPACE},
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Revert a resize request which is currently in progress.
All resizes are automatically confirmed after 24 hours if they have
not already been confirmed explicitly or reverted.
For more info refer to the API documentation: http://goo.gl/AizBu
:param node: node for which the resize request will be reverted.
:type node: :class:`Node`
:rtype: ``bool``
"""
elm = ET.Element(
'revertResize',
{'xmlns': self.XML_NAMESPACE}
)
resp = self.connection.request("/servers/%s/action" % (node.id),
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.NO_CONTENT
def ex_rebuild(self, node_id, image_id):
"""
Rebuilds the specified server.
:param node_id: ID of the node which should be used
:type node_id: ``str``
:param image_id: ID of the image which should be used
:type image_id: ``str``
:rtype: ``bool``
"""
# @TODO: Remove those ifs in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if isinstance(image_id, NodeImage):
image_id = image_id.id
elm = ET.Element(
'rebuild',
{'xmlns': self.XML_NAMESPACE,
'imageId': image_id}
)
resp = self.connection.request("/servers/%s/action" % node_id,
method='POST',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_create_ip_group(self, group_name, node_id=None):
"""
Creates a shared IP group.
:param group_name: group name which should be used
:type group_name: ``str``
:param node_id: ID of the node which should be used
:type node_id: ``str``
:rtype: ``bool``
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
group_elm = ET.Element(
'sharedIpGroup',
{'xmlns': self.XML_NAMESPACE,
'name': group_name}
)
if node_id:
ET.SubElement(
group_elm,
'server',
{'id': node_id}
)
resp = self.connection.request('/shared_ip_groups',
method='POST',
data=ET.tostring(group_elm))
return self._to_shared_ip_group(resp.object)
def ex_list_ip_groups(self, details=False):
"""
Lists IDs and names for shared IP groups.
If details lists all details for shared IP groups.
:param details: True if details is required
:type details: ``bool``
:rtype: ``list`` of :class:`OpenStack_1_0_SharedIpGroup`
"""
uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups'
resp = self.connection.request(uri,
method='GET')
groups = findall(resp.object, 'sharedIpGroup',
self.XML_NAMESPACE)
return [self._to_shared_ip_group(el) for el in groups]
def ex_delete_ip_group(self, group_id):
"""
Deletes the specified shared IP group.
:param group_id: group id which should be used
:type group_id: ``str``
:rtype: ``bool``
"""
uri = '/shared_ip_groups/%s' % group_id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_share_ip(self, group_id, node_id, ip, configure_node=True):
"""
Shares an IP address to the specified server.
:param group_id: group id which should be used
:type group_id: ``str``
:param node_id: ID of the node which should be used
:type node_id: ``str``
:param ip: ip which should be used
:type ip: ``str``
:param configure_node: configure node
:type configure_node: ``bool``
:rtype: ``bool``
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
if configure_node:
str_configure = 'true'
else:
str_configure = 'false'
elm = ET.Element(
'shareIp',
{'xmlns': self.XML_NAMESPACE,
'sharedIpGroupId': group_id,
'configureServer': str_configure},
)
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='PUT',
data=ET.tostring(elm))
return resp.status == httplib.ACCEPTED
def ex_unshare_ip(self, node_id, ip):
"""
Removes a shared IP address from the specified server.
:param node_id: ID of the node which should be used
:type node_id: ``str``
:param ip: ip which should be used
:type ip: ``str``
:rtype: ``bool``
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='DELETE')
return resp.status == httplib.ACCEPTED
def ex_list_ip_addresses(self, node_id):
"""
List all server addresses.
:param node_id: ID of the node which should be used
:type node_id: ``str``
:rtype: :class:`OpenStack_1_0_NodeIpAddresses`
"""
# @TODO: Remove this if in 0.6
if isinstance(node_id, Node):
node_id = node_id.id
uri = '/servers/%s/ips' % node_id
resp = self.connection.request(uri,
method='GET')
return self._to_ip_addresses(resp.object)
def _metadata_to_xml(self, metadata):
if len(metadata) == 0:
return None
metadata_elm = ET.Element('metadata')
for k, v in list(metadata.items()):
meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)})
meta_elm.text = str(v)
return metadata_elm
def _files_to_xml(self, files):
if len(files) == 0:
return None
personality_elm = ET.Element('personality')
for k, v in list(files.items()):
file_elm = ET.SubElement(personality_elm,
'file',
{'path': str(k)})
file_elm.text = base64.b64encode(b(v))
return personality_elm
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, ['reboot', ('type', reboot_type)])
return resp.status == httplib.ACCEPTED
def _node_action(self, node, body):
if isinstance(body, list):
attr = ' '.join(['%s="%s"' % (item[0], item[1])
for item in body[1:]])
body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr)
uri = '/servers/%s/action' % (node.id)
resp = self.connection.request(uri, method='POST', data=body)
return resp
def _to_nodes(self, object):
node_elements = findall(object, 'server', self.XML_NAMESPACE)
return [self._to_node(el) for el in node_elements]
def _to_node_from_obj(self, obj):
return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0])
def _to_node(self, el):
def get_ips(el):
return [ip.get('addr') for ip in el]
def get_meta_dict(el):
d = {}
for meta in el:
d[meta.get('key')] = meta.text
return d
public_ip = get_ips(findall(el, 'addresses/public/ip',
self.XML_NAMESPACE))
private_ip = get_ips(findall(el, 'addresses/private/ip',
self.XML_NAMESPACE))
metadata = get_meta_dict(findall(el, 'metadata/meta',
self.XML_NAMESPACE))
n = Node(id=el.get('id'),
name=el.get('name'),
state=self.NODE_STATE_MAP.get(
el.get('status'), NodeState.UNKNOWN),
public_ips=public_ip,
private_ips=private_ip,
driver=self.connection.driver,
# pylint: disable=no-member
extra={
'password': el.get('adminPass'),
'hostId': el.get('hostId'),
'imageId': el.get('imageId'),
'flavorId': el.get('flavorId'),
'uri': "https://%s%s/servers/%s" % (
self.connection.host,
self.connection.request_path, el.get('id')),
'service_name': self.connection.get_service_name(),
'metadata': metadata})
return n
def _to_sizes(self, object):
elements = findall(object, 'flavor', self.XML_NAMESPACE)
return [self._to_size(el) for el in elements]
def _to_size(self, el):
vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None
return OpenStackNodeSize(id=el.get('id'),
name=el.get('name'),
ram=int(el.get('ram')),
disk=int(el.get('disk')),
# XXX: needs hardcode
vcpus=vcpus,
bandwidth=None,
extra=el.get('extra_specs'),
# Hardcoded
price=self._get_size_price(el.get('id')),
driver=self.connection.driver)
def ex_limits(self):
"""
Extra call to get account's limits, such as
rates (for example amount of POST requests per day)
and absolute limits like total amount of available
RAM to be used by servers.
:return: dict with keys 'rate' and 'absolute'
:rtype: ``dict``
"""
def _to_rate(el):
rate = {}
for item in list(el.items()):
rate[item[0]] = item[1]
return rate
def _to_absolute(el):
return {el.get('name'): el.get('value')}
limits = self.connection.request("/limits").object
rate = [_to_rate(el) for el in findall(limits, 'rate/limit',
self.XML_NAMESPACE)]
absolute = {}
for item in findall(limits, 'absolute/limit',
self.XML_NAMESPACE):
absolute.update(_to_absolute(item))
return {"rate": rate, "absolute": absolute}
def create_image(self, node, name, description=None, reboot=True):
"""Create an image for node.
@inherits: :class:`NodeDriver.create_image`
:param node: node to use as a base for image
:type node: :class:`Node`
:param name: name for new image
:type name: ``str``
:rtype: :class:`NodeImage`
"""
image_elm = ET.Element(
'image',
{'xmlns': self.XML_NAMESPACE,
'name': name,
'serverId': node.id}
)
return self._to_image(
self.connection.request("/images", method="POST",
data=ET.tostring(image_elm)).object)
def delete_image(self, image):
"""Delete an image for node.
@inherits: :class:`NodeDriver.delete_image`
:param image: the image to be deleted
:type image: :class:`NodeImage`
:rtype: ``bool``
"""
uri = '/images/%s' % image.id
resp = self.connection.request(uri, method='DELETE')
return resp.status == httplib.NO_CONTENT
def _to_shared_ip_group(self, el):
servers_el = findall(el, 'servers', self.XML_NAMESPACE)
if servers_el:
servers = [s.get('id')
for s in findall(servers_el[0], 'server',
self.XML_NAMESPACE)]
else:
servers = None
return OpenStack_1_0_SharedIpGroup(id=el.get('id'),
name=el.get('name'),
servers=servers)
def _to_ip_addresses(self, el):
public_ips = [ip.get('addr') for ip in findall(
findall(el, 'public', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
private_ips = [ip.get('addr') for ip in findall(
findall(el, 'private', self.XML_NAMESPACE)[0],
'ip', self.XML_NAMESPACE)]
return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips)
def _get_size_price(self, size_id):
try:
return get_size_price(driver_type='compute',
driver_name=self.api_name,
size_id=size_id)
except KeyError:
return 0.0
class OpenStack_1_0_SharedIpGroup(object):
"""
Shared IP group info.
"""
def __init__(self, id, name, servers=None):
self.id = str(id)
self.name = name
self.servers = servers
class OpenStack_1_0_NodeIpAddresses(object):
"""
List of public and private IP addresses of a Node.
"""
def __init__(self, public_addresses, private_addresses):
self.public_addresses = public_addresses
self.private_addresses = private_addresses
class OpenStack_1_1_Response(OpenStackResponse):
def __init__(self, *args, **kwargs):
# done because of a circular reference from
# NodeDriver -> Connection -> Response
self.node_driver = OpenStack_1_1_NodeDriver
super(OpenStack_1_1_Response, self).__init__(*args, **kwargs)
class OpenStackNetwork(object):
"""
A Virtual Network.
"""
def __init__(self, id, name, cidr, driver, extra=None):
self.id = str(id)
self.name = name
self.cidr = cidr
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return '<OpenStackNetwork id="%s" name="%s" cidr="%s">' % (self.id,
self.name,
self.cidr,)
class OpenStackSecurityGroup(object):
"""
A Security Group.
"""
def __init__(self, id, tenant_id, name, description, driver, rules=None,
extra=None):
"""
Constructor.
:keyword id: Group id.
:type id: ``str``
:keyword tenant_id: Owner of the security group.
:type tenant_id: ``str``
:keyword name: Human-readable name for the security group. Might
not be unique.
:type name: ``str``
:keyword description: Human-readable description of a security
group.
:type description: ``str``
:keyword rules: Rules associated with this group.
:type rules: ``list`` of
:class:`OpenStackSecurityGroupRule`
:keyword extra: Extra attributes associated with this group.
:type extra: ``dict``
"""
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.driver = driver
self.rules = rules or []
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroup id=%s tenant_id=%s name=%s \
description=%s>' % (self.id, self.tenant_id, self.name,
self.description))
class OpenStackSecurityGroupRule(object):
"""
A Rule of a Security Group.
"""
def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port,
driver, ip_range=None, group=None, tenant_id=None,
direction=None, extra=None):
"""
Constructor.
:keyword id: Rule id.
:type id: ``str``
:keyword parent_group_id: ID of the parent security group.
:type parent_group_id: ``str``
:keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc).
:type ip_protocol: ``str``
:keyword from_port: Port at start of range.
:type from_port: ``int``
:keyword to_port: Port at end of range.
:type to_port: ``int``
:keyword ip_range: CIDR for address range.
:type ip_range: ``str``
:keyword group: Name of a source security group to apply to rule.
:type group: ``str``
:keyword tenant_id: Owner of the security group.
:type tenant_id: ``str``
:keyword direction: Security group Direction (ingress or egress).
:type direction: ``str``
:keyword extra: Extra attributes associated with this rule.
:type extra: ``dict``
"""
self.id = id
self.parent_group_id = parent_group_id
self.ip_protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.driver = driver
self.ip_range = ''
self.group = {}
self.direction = 'ingress'
if group is None:
self.ip_range = ip_range
else:
self.group = {'name': group, 'tenant_id': tenant_id}
# by default in old versions only ingress was used
if direction is not None:
if direction in ['ingress', 'egress']:
self.direction = direction
else:
raise OpenStackException("Security group direction incorrect "
"value: ingress or egress.", 500,
driver)
self.tenant_id = tenant_id
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackSecurityGroupRule id=%s parent_group_id=%s \
ip_protocol=%s from_port=%s to_port=%s>' % (self.id,
self.parent_group_id, self.ip_protocol, self.from_port,
self.to_port))
class OpenStackKeyPair(object):
"""
A KeyPair.
"""
def __init__(self, name, fingerprint, public_key, driver, private_key=None,
extra=None):
"""
Constructor.
:keyword name: Name of the KeyPair.
:type name: ``str``
:keyword fingerprint: Fingerprint of the KeyPair
:type fingerprint: ``str``
:keyword public_key: Public key in OpenSSH format.
:type public_key: ``str``
:keyword private_key: Private key in PEM format.
:type private_key: ``str``
:keyword extra: Extra attributes associated with this KeyPair.
:type extra: ``dict``
"""
self.name = name
self.fingerprint = fingerprint
self.public_key = public_key
self.private_key = private_key
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return ('<OpenStackKeyPair name=%s fingerprint=%s public_key=%s ...>'
% (self.name, self.fingerprint, self.public_key))
class OpenStack_1_1_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_1_1_NodeDriver(OpenStackNodeDriver):
"""
OpenStack node driver.
"""
connectionCls = OpenStack_1_1_Connection
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
_networks_url_prefix = '/os-networks'
def __init__(self, *args, **kwargs):
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs)
def create_node(self, **kwargs):
"""Create a new node
@inherits: :class:`NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: String containing user data
see
https://help.ubuntu.com/community/CloudInit
:type ex_userdata: ``str``
:keyword ex_config_drive: Enable config drive
see
http://docs.openstack.org/grizzly/openstack-compute/admin/content/config-drive.html
:type ex_config_drive: ``bool``
:keyword ex_security_groups: List of security groups to assign to
the node
:type ex_security_groups: ``list`` of
:class:`OpenStackSecurityGroup`
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_files: File Path => File contents to create on
the node
:type ex_files: ``dict``
:keyword networks: The server is launched into a set of Networks.
:type networks: ``list`` of :class:`OpenStackNetwork`
:keyword ex_disk_config: Name of the disk configuration.
Can be either ``AUTO`` or ``MANUAL``.
:type ex_disk_config: ``str``
:keyword ex_config_drive: If True enables metadata injection in a
server through a configuration drive.
:type ex_config_drive: ``bool``
:keyword ex_admin_pass: The root password for the node
:type ex_admin_pass: ``str``
:keyword ex_availability_zone: Nova availability zone for the node
:type ex_availability_zone: ``str``
"""
server_params = self._create_args_to_params(None, **kwargs)
resp = self.connection.request("/servers",
method='POST',
data={'server': server_params})
create_response = resp.object['server']
server_resp = self.connection.request(
'/servers/%s' % create_response['id'])
server_object = server_resp.object['server']
# adminPass is not always present
# http://docs.openstack.org/essex/openstack-compute/admin/
# content/configuring-compute-API.html#d6e1833
server_object['adminPass'] = create_response.get('adminPass', None)
return self._to_node(server_object)
def _to_images(self, obj, ex_only_active):
images = []
for image in obj['images']:
if ex_only_active and image.get('status') != 'ACTIVE':
continue
images.append(self._to_image(image))
return images
def _to_image(self, api_image):
server = api_image.get('server', {})
updated = api_image.get('updated_at') or api_image['updated']
created = api_image.get('created_at') or api_image['created']
min_ram = api_image.get('min_ram')
if min_ram is None:
min_ram = api_image.get('minRam')
min_disk = api_image.get('min_disk')
if min_disk is None:
min_disk = api_image.get('minDisk')
return NodeImage(
id=api_image['id'],
name=api_image['name'],
driver=self,
extra=dict(
visibility=api_image.get('visibility'),
updated=updated,
created=created,
status=api_image['status'],
progress=api_image.get('progress'),
metadata=api_image.get('metadata'),
os_type=api_image.get('os_type'),
serverId=server.get('id'),
minDisk=min_disk,
minRam=min_ram,
)
)
def _to_image_member(self, api_image_member):
created = api_image_member['created_at']
updated = api_image_member.get('updated_at')
return NodeImageMember(
id=api_image_member['member_id'],
image_id=api_image_member['image_id'],
state=api_image_member['status'],
created=created,
driver=self,
extra=dict(
schema=api_image_member.get('schema'),
updated=updated,
)
)
def _to_nodes(self, obj):
servers = obj['servers']
return [self._to_node(server) for server in servers]
def _to_volumes(self, obj):
volumes = obj['volumes']
return [self._to_volume(volume) for volume in volumes]
def _to_snapshots(self, obj):
snapshots = obj['snapshots']
return [self._to_snapshot(snapshot) for snapshot in snapshots]
def _to_sizes(self, obj):
flavors = obj['flavors']
return [self._to_size(flavor) for flavor in flavors]
def _create_args_to_params(self, node, **kwargs):
server_params = {
'name': kwargs.get('name'),
'metadata': kwargs.get('ex_metadata', {}),
'personality': self._files_to_personality(kwargs.get("ex_files",
{}))
}
if 'ex_availability_zone' in kwargs:
server_params['availability_zone'] = kwargs['ex_availability_zone']
if 'ex_keyname' in kwargs:
server_params['key_name'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
server_params['user_data'] = base64.b64encode(
b(kwargs['ex_userdata'])).decode('ascii')
if 'ex_config_drive' in kwargs:
server_params['config_drive'] = kwargs['ex_config_drive']
if 'ex_disk_config' in kwargs:
server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config']
if 'ex_config_drive' in kwargs:
server_params['config_drive'] = str(kwargs['ex_config_drive'])
if 'ex_admin_pass' in kwargs:
server_params['adminPass'] = kwargs['ex_admin_pass']
if 'networks' in kwargs:
networks = kwargs['networks']
networks = [{'uuid': network.id} for network in networks]
server_params['networks'] = networks
if 'ex_security_groups' in kwargs:
server_params['security_groups'] = []
for security_group in kwargs['ex_security_groups']:
name = security_group.name
server_params['security_groups'].append({'name': name})
if 'ex_blockdevicemappings' in kwargs:
server_params['block_device_mapping_v2'] = \
kwargs['ex_blockdevicemappings']
if 'name' in kwargs:
server_params['name'] = kwargs.get('name')
else:
server_params['name'] = node.name
if 'image' in kwargs:
server_params['imageRef'] = kwargs.get('image').id
else:
server_params['imageRef'] = node.extra.get(
'imageId', ''
) if node else ''
if 'size' in kwargs:
server_params['flavorRef'] = kwargs.get('size').id
else:
server_params['flavorRef'] = node.extra.get('flavorId')
return server_params
def _files_to_personality(self, files):
rv = []
for k, v in list(files.items()):
rv.append({'path': k, 'contents': base64.b64encode(b(v))})
return rv
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, 'reboot', type=reboot_type)
return resp.status == httplib.ACCEPTED
def ex_set_password(self, node, password):
"""
Changes the administrator password for a specified server.
:param node: Node to rebuild.
:type node: :class:`Node`
:param password: The administrator password.
:type password: ``str``
:rtype: ``bool``
"""
resp = self._node_action(node, 'changePassword', adminPass=password)
node.extra['password'] = password
return resp.status == httplib.ACCEPTED
def ex_rebuild(self, node, image, **kwargs):
"""
Rebuild a Node.
:param node: Node to rebuild.
:type node: :class:`Node`
:param image: New image to use.
:type image: :class:`NodeImage`
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_files: File Path => File contents to create on
the node
:type ex_files: ``dict``
:keyword ex_keyname: Name of existing public key to inject into
instance
:type ex_keyname: ``str``
:keyword ex_userdata: String containing user data
see
https://help.ubuntu.com/community/CloudInit
:type ex_userdata: ``str``
:keyword ex_security_groups: List of security groups to assign to
the node
:type ex_security_groups: ``list`` of
:class:`OpenStackSecurityGroup`
:keyword ex_disk_config: Name of the disk configuration.
Can be either ``AUTO`` or ``MANUAL``.
:type ex_disk_config: ``str``
:keyword ex_config_drive: If True enables metadata injection in a
server through a configuration drive.
:type ex_config_drive: ``bool``
:rtype: ``bool``
"""
server_params = self._create_args_to_params(node, image=image,
**kwargs)
resp = self._node_action(node, 'rebuild', **server_params)
return resp.status == httplib.ACCEPTED
def ex_resize(self, node, size):
"""
Change a node size.
:param node: Node to resize.
:type node: :class:`Node`
:type size: :class:`NodeSize`
:param size: New size to use.
:rtype: ``bool``
"""
server_params = {'flavorRef': size.id}
resp = self._node_action(node, 'resize', **server_params)
return resp.status == httplib.ACCEPTED
def ex_confirm_resize(self, node):
"""
Confirms a pending resize action.
:param node: Node to resize.
:type node: :class:`Node`
:rtype: ``bool``
"""
resp = self._node_action(node, 'confirmResize')
return resp.status == httplib.NO_CONTENT
def ex_revert_resize(self, node):
"""
Cancels and reverts a pending resize action.
:param node: Node to resize.
:type node: :class:`Node`
:rtype: ``bool``
"""
resp = self._node_action(node, 'revertResize')
return resp.status == httplib.ACCEPTED
def create_image(self, node, name, metadata=None):
"""
Creates a new image.
:param node: Node
:type node: :class:`Node`
:param name: The name for the new image.
:type name: ``str``
:param metadata: Key and value pairs for metadata.
:type metadata: ``dict``
:rtype: :class:`NodeImage`
"""
optional_params = {}
if metadata:
optional_params['metadata'] = metadata
resp = self._node_action(node, 'createImage', name=name,
**optional_params)
image_id = self._extract_image_id_from_url(resp.headers['location'])
return self.get_image(image_id=image_id)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
:param node: Node
:type node: :class:`Node`
:param name: The name of the server.
:type name: ``str``
:rtype: :class:`Node`
"""
return self._update_node(node, name=name)
def ex_get_metadata(self, node):
"""
Get a Node's metadata.
:param node: Node
:type node: :class:`Node`
:return: Key/Value metadata associated with node.
:rtype: ``dict``
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,),
method='GET',).object['metadata']
def ex_set_metadata(self, node, metadata):
"""
Sets the Node's metadata.
:param node: Node
:type node: :class:`Node`
:param metadata: Key/Value metadata to associate with a node
:type metadata: ``dict``
:rtype: ``dict``
"""
return self.connection.request(
'/servers/%s/metadata' % (node.id,), method='PUT',
data={'metadata': metadata}
).object['metadata']
def ex_update_node(self, node, **node_updates):
"""
Update the Node's editable attributes. The OpenStack API currently
supports editing name and IPv4/IPv6 access addresses.
The driver currently only supports updating the node name.
:param node: Node
:type node: :class:`Node`
:keyword name: New name for the server
:type name: ``str``
:rtype: :class:`Node`
"""
potential_data = self._create_args_to_params(node, **node_updates)
updates = {'name': potential_data['name']}
return self._update_node(node, **updates)
def _to_networks(self, obj):
networks = obj['networks']
return [self._to_network(network) for network in networks]
def _to_network(self, obj):
return OpenStackNetwork(id=obj['id'],
name=obj['label'],
cidr=obj.get('cidr', None),
driver=self)
def ex_list_networks(self):
"""
Get a list of Networks that are available.
:rtype: ``list`` of :class:`OpenStackNetwork`
"""
response = self.connection.request(self._networks_url_prefix).object
return self._to_networks(response)
def ex_get_network(self, network_id):
"""
Retrieve the Network with the given ID
:param networkId: ID of the network
:type networkId: ``str``
:rtype :class:`OpenStackNetwork`
"""
request_url = "{networks_url_prefix}/{network_id}".format(
networks_url_prefix=self._networks_url_prefix,
network_id=network_id
)
response = self.connection.request(request_url).object
return self._to_network(response['network'])
def ex_create_network(self, name, cidr):
"""
Create a new Network
:param name: Name of network which should be used
:type name: ``str``
:param cidr: cidr of network which should be used
:type cidr: ``str``
:rtype: :class:`OpenStackNetwork`
"""
data = {'network': {'cidr': cidr, 'label': name}}
response = self.connection.request(self._networks_url_prefix,
method='POST', data=data).object
return self._to_network(response['network'])
def ex_delete_network(self, network):
"""
Delete a Network
:param network: Network which should be used
:type network: :class:`OpenStackNetwork`
:rtype: ``bool``
"""
resp = self.connection.request('%s/%s' % (self._networks_url_prefix,
network.id),
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def ex_get_console_output(self, node, length=None):
"""
Get console output
:param node: node
:type node: :class:`Node`
:param length: Optional number of lines to fetch from the
console log
:type length: ``int``
:return: Dictionary with the output
:rtype: ``dict``
"""
data = {
"os-getConsoleOutput": {
"length": length
}
}
resp = self.connection.request('/servers/%s/action' % node.id,
method='POST', data=data).object
return resp
def ex_list_snapshots(self):
return self._to_snapshots(
self.connection.request('/os-snapshots').object)
def ex_get_snapshot(self, snapshotId):
return self._to_snapshot(
self.connection.request('/os-snapshots/%s' % snapshotId).object)
def list_volume_snapshots(self, volume):
return [snapshot for snapshot in self.ex_list_snapshots()
if snapshot.extra['volume_id'] == volume.id]
def create_volume_snapshot(self, volume, name=None, ex_description=None,
ex_force=True):
"""
Create snapshot from volume
:param volume: Instance of `StorageVolume`
:type volume: `StorageVolume`
:param name: Name of snapshot (optional)
:type name: `str` | `NoneType`
:param ex_description: Description of the snapshot (optional)
:type ex_description: `str` | `NoneType`
:param ex_force: Specifies if we create a snapshot that is not in
state `available`. For example `in-use`. Defaults
to True. (optional)
:type ex_force: `bool`
:rtype: :class:`VolumeSnapshot`
"""
data = {'snapshot': {'volume_id': volume.id, 'force': ex_force}}
if name is not None:
data['snapshot']['display_name'] = name
if ex_description is not None:
data['snapshot']['display_description'] = ex_description
return self._to_snapshot(self.connection.request('/os-snapshots',
method='POST',
data=data).object)
def destroy_volume_snapshot(self, snapshot):
resp = self.connection.request('/os-snapshots/%s' % snapshot.id,
method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_create_snapshot(self, volume, name, description=None, force=False):
"""
Create a snapshot based off of a volume.
:param volume: volume
:type volume: :class:`StorageVolume`
:keyword name: New name for the volume snapshot
:type name: ``str``
:keyword description: Description of the snapshot (optional)
:type description: ``str``
:keyword force: Whether to force creation (optional)
:type force: ``bool``
:rtype: :class:`VolumeSnapshot`
"""
warnings.warn('This method has been deprecated in favor of the '
'create_volume_snapshot method')
return self.create_volume_snapshot(volume, name,
ex_description=description,
ex_force=force)
def ex_delete_snapshot(self, snapshot):
"""
Delete a VolumeSnapshot
:param snapshot: snapshot
:type snapshot: :class:`VolumeSnapshot`
:rtype: ``bool``
"""
warnings.warn('This method has been deprecated in favor of the '
'destroy_volume_snapshot method')
return self.destroy_volume_snapshot(snapshot)
def _to_security_group_rules(self, obj):
return [self._to_security_group_rule(security_group_rule) for
security_group_rule in obj]
def _to_security_group_rule(self, obj):
ip_range = group = tenant_id = None
if obj['group'] == {}:
ip_range = obj['ip_range'].get('cidr', None)
else:
group = obj['group'].get('name', None)
tenant_id = obj['group'].get('tenant_id', None)
return OpenStackSecurityGroupRule(
id=obj['id'], parent_group_id=obj['parent_group_id'],
ip_protocol=obj['ip_protocol'], from_port=obj['from_port'],
to_port=obj['to_port'], driver=self, ip_range=ip_range,
group=group, tenant_id=tenant_id)
def _to_security_groups(self, obj):
security_groups = obj['security_groups']
return [self._to_security_group(security_group) for security_group in
security_groups]
def _to_security_group(self, obj):
rules = self._to_security_group_rules(obj.get('security_group_rules',
obj.get('rules', [])))
return OpenStackSecurityGroup(id=obj['id'],
tenant_id=obj['tenant_id'],
name=obj['name'],
description=obj.get('description', ''),
rules=rules,
driver=self)
def ex_list_security_groups(self):
"""
Get a list of Security Groups that are available.
:rtype: ``list`` of :class:`OpenStackSecurityGroup`
"""
return self._to_security_groups(
self.connection.request('/os-security-groups').object)
def ex_get_node_security_groups(self, node):
"""
Get Security Groups of the specified server.
:rtype: ``list`` of :class:`OpenStackSecurityGroup`
"""
return self._to_security_groups(
self.connection.request('/servers/%s/os-security-groups' %
(node.id)).object)
def ex_create_security_group(self, name, description):
"""
Create a new Security Group
:param name: Name of the new Security Group
:type name: ``str``
:param description: Description of the new Security Group
:type description: ``str``
:rtype: :class:`OpenStackSecurityGroup`
"""
return self._to_security_group(self.connection.request(
'/os-security-groups', method='POST',
data={'security_group': {'name': name, 'description': description}}
).object['security_group'])
def ex_delete_security_group(self, security_group):
"""
Delete a Security Group.
:param security_group: Security Group should be deleted
:type security_group: :class:`OpenStackSecurityGroup`
:rtype: ``bool``
"""
resp = self.connection.request('/os-security-groups/%s' %
(security_group.id),
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def ex_create_security_group_rule(self, security_group, ip_protocol,
from_port, to_port, cidr=None,
source_security_group=None):
"""
Create a new Rule in a Security Group
:param security_group: Security Group in which to add the rule
:type security_group: :class:`OpenStackSecurityGroup`
:param ip_protocol: Protocol to which this rule applies
Examples: tcp, udp, ...
:type ip_protocol: ``str``
:param from_port: First port of the port range
:type from_port: ``int``
:param to_port: Last port of the port range
:type to_port: ``int``
:param cidr: CIDR notation of the source IP range for this rule
:type cidr: ``str``
:param source_security_group: Existing Security Group to use as the
source (instead of CIDR)
:type source_security_group: L{OpenStackSecurityGroup
:rtype: :class:`OpenStackSecurityGroupRule`
"""
source_security_group_id = None
if type(source_security_group) == OpenStackSecurityGroup:
source_security_group_id = source_security_group.id
return self._to_security_group_rule(self.connection.request(
'/os-security-group-rules', method='POST',
data={'security_group_rule': {
'ip_protocol': ip_protocol,
'from_port': from_port,
'to_port': to_port,
'cidr': cidr,
'group_id': source_security_group_id,
'parent_group_id': security_group.id}}
).object['security_group_rule'])
def ex_delete_security_group_rule(self, rule):
"""
Delete a Rule from a Security Group.
:param rule: Rule should be deleted
:type rule: :class:`OpenStackSecurityGroupRule`
:rtype: ``bool``
"""
resp = self.connection.request('/os-security-group-rules/%s' %
(rule.id), method='DELETE')
return resp.status == httplib.NO_CONTENT
def _to_key_pairs(self, obj):
key_pairs = obj['keypairs']
key_pairs = [self._to_key_pair(key_pair['keypair']) for key_pair in
key_pairs]
return key_pairs
def _to_key_pair(self, obj):
key_pair = KeyPair(name=obj['name'],
fingerprint=obj['fingerprint'],
public_key=obj['public_key'],
private_key=obj.get('private_key', None),
driver=self)
return key_pair
def list_key_pairs(self):
response = self.connection.request('/os-keypairs')
key_pairs = self._to_key_pairs(response.object)
return key_pairs
def get_key_pair(self, name):
self.connection.set_context({'key_pair_name': name})
response = self.connection.request('/os-keypairs/%s' % (name))
key_pair = self._to_key_pair(response.object['keypair'])
return key_pair
def create_key_pair(self, name):
data = {'keypair': {'name': name}}
response = self.connection.request('/os-keypairs', method='POST',
data=data)
key_pair = self._to_key_pair(response.object['keypair'])
return key_pair
def import_key_pair_from_string(self, name, key_material):
data = {'keypair': {'name': name, 'public_key': key_material}}
response = self.connection.request('/os-keypairs', method='POST',
data=data)
key_pair = self._to_key_pair(response.object['keypair'])
return key_pair
def delete_key_pair(self, key_pair):
"""
Delete a KeyPair.
:param keypair: KeyPair to delete
:type keypair: :class:`OpenStackKeyPair`
:rtype: ``bool``
"""
response = self.connection.request('/os-keypairs/%s' % (key_pair.name),
method='DELETE')
return response.status == httplib.ACCEPTED
def ex_list_keypairs(self):
"""
Get a list of KeyPairs that are available.
:rtype: ``list`` of :class:`OpenStackKeyPair`
"""
warnings.warn('This method has been deprecated in favor of '
'list_key_pairs method')
return self.list_key_pairs()
def ex_create_keypair(self, name):
"""
Create a new KeyPair
:param name: Name of the new KeyPair
:type name: ``str``
:rtype: :class:`OpenStackKeyPair`
"""
warnings.warn('This method has been deprecated in favor of '
'create_key_pair method')
return self.create_key_pair(name=name)
def ex_import_keypair(self, name, keyfile):
"""
Import a KeyPair from a file
:param name: Name of the new KeyPair
:type name: ``str``
:param keyfile: Path to the public key file (in OpenSSH format)
:type keyfile: ``str``
:rtype: :class:`OpenStackKeyPair`
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_file method')
return self.import_key_pair_from_file(name=name, key_file_path=keyfile)
def ex_import_keypair_from_string(self, name, key_material):
"""
Import a KeyPair from a string
:param name: Name of the new KeyPair
:type name: ``str``
:param key_material: Public key (in OpenSSH format)
:type key_material: ``str``
:rtype: :class:`OpenStackKeyPair`
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_string method')
return self.import_key_pair_from_string(name=name,
key_material=key_material)
def ex_delete_keypair(self, keypair):
"""
Delete a KeyPair.
:param keypair: KeyPair to delete
:type keypair: :class:`OpenStackKeyPair`
:rtype: ``bool``
"""
warnings.warn('This method has been deprecated in favor of '
'delete_key_pair method')
return self.delete_key_pair(key_pair=keypair)
def ex_get_size(self, size_id):
"""
Get a NodeSize
:param size_id: ID of the size which should be used
:type size_id: ``str``
:rtype: :class:`NodeSize`
"""
return self._to_size(self.connection.request(
'/flavors/%s' % (size_id,)) .object['flavor'])
def get_image(self, image_id):
"""
Get a NodeImage
@inherits: :class:`NodeDriver.get_image`
:param image_id: ID of the image which should be used
:type image_id: ``str``
:rtype: :class:`NodeImage`
"""
return self._to_image(self.connection.request(
'/images/%s' % (image_id,)).object['image'])
def delete_image(self, image):
"""
Delete a NodeImage
@inherits: :class:`NodeDriver.delete_image`
:param image: image witch should be used
:type image: :class:`NodeImage`
:rtype: ``bool``
"""
resp = self.connection.request('/images/%s' % (image.id,),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def _node_action(self, node, action, **params):
params = params or None
return self.connection.request('/servers/%s/action' % (node.id,),
method='POST', data={action: params})
def _update_node(self, node, **node_updates):
"""
Updates the editable attributes of a server, which currently include
its name and IPv4/IPv6 access addresses.
"""
return self._to_node(
self.connection.request(
'/servers/%s' % (node.id,), method='PUT',
data={'server': node_updates}
).object['server']
)
def _to_node_from_obj(self, obj):
return self._to_node(obj['server'])
def _to_node(self, api_node):
public_networks_labels = ['public', 'internet']
public_ips, private_ips = [], []
for label, values in api_node['addresses'].items():
for value in values:
ip = value['addr']
is_public_ip = False
try:
is_public_ip = is_public_subnet(ip)
except Exception:
# IPv6
# Openstack Icehouse sets 'OS-EXT-IPS:type' to 'floating'
# for public and 'fixed' for private
explicit_ip_type = value.get('OS-EXT-IPS:type', None)
if label in public_networks_labels:
is_public_ip = True
elif explicit_ip_type == 'floating':
is_public_ip = True
elif explicit_ip_type == 'fixed':
is_public_ip = False
if is_public_ip:
public_ips.append(ip)
else:
private_ips.append(ip)
# Sometimes 'image' attribute is not present if the node is in an error
# state
image = api_node.get('image', None)
image_id = image.get('id', None) if image else None
config_drive = api_node.get("config_drive", False)
volumes_attached = api_node.get('os-extended-volumes:volumes_attached')
created = parse_date(api_node["created"])
return Node(
id=api_node['id'],
name=api_node['name'],
state=self.NODE_STATE_MAP.get(api_node['status'],
NodeState.UNKNOWN),
public_ips=public_ips,
private_ips=private_ips,
created_at=created,
driver=self,
extra=dict(
addresses=api_node['addresses'],
hostId=api_node['hostId'],
access_ip=api_node.get('accessIPv4'),
access_ipv6=api_node.get('accessIPv6', None),
# Docs says "tenantId", but actual is "tenant_id". *sigh*
# Best handle both.
tenantId=api_node.get('tenant_id') or api_node['tenantId'],
userId=api_node.get('user_id', None),
imageId=image_id,
flavorId=api_node['flavor']['id'],
uri=next(link['href'] for link in api_node['links'] if
link['rel'] == 'self'),
# pylint: disable=no-member
service_name=self.connection.get_service_name(),
metadata=api_node['metadata'],
password=api_node.get('adminPass', None),
created=api_node['created'],
updated=api_node['updated'],
key_name=api_node.get('key_name', None),
disk_config=api_node.get('OS-DCF:diskConfig', None),
config_drive=config_drive,
availability_zone=api_node.get('OS-EXT-AZ:availability_zone'),
volumes_attached=volumes_attached,
task_state=api_node.get("OS-EXT-STS:task_state", None),
vm_state=api_node.get("OS-EXT-STS:vm_state", None),
power_state=api_node.get("OS-EXT-STS:power_state", None),
progress=api_node.get("progress", None),
fault=api_node.get('fault')
),
)
def _to_volume(self, api_node):
if 'volume' in api_node:
api_node = api_node['volume']
state = self.VOLUME_STATE_MAP.get(api_node['status'],
StorageVolumeState.UNKNOWN)
return StorageVolume(
id=api_node['id'],
name=api_node.get('displayName', api_node.get('name')),
size=api_node['size'],
state=state,
driver=self,
extra={
'description': api_node.get('displayDescription',
api_node.get('description')),
'attachments': [att for att in api_node['attachments'] if att],
# TODO: remove in 1.18.0
'state': api_node.get('status', None),
'snapshot_id': api_node.get('snapshot_id',
api_node.get('snapshotId')),
'location': api_node.get('availability_zone',
api_node.get('availabilityZone')),
'volume_type': api_node.get('volume_type',
api_node.get('volumeType')),
'metadata': api_node.get('metadata', None),
'created_at': api_node.get('created_at',
api_node.get('createdAt'))
}
)
def _to_snapshot(self, data):
if 'snapshot' in data:
data = data['snapshot']
volume_id = data.get('volume_id', data.get('volumeId', None))
display_name = data.get('name',
data.get('display_name',
data.get('displayName', None)))
created_at = data.get('created_at', data.get('createdAt', None))
description = data.get('description',
data.get('display_description',
data.get('displayDescription', None)))
status = data.get('status', None)
extra = {'volume_id': volume_id,
'name': display_name,
'created': created_at,
'description': description,
'status': status}
state = self.SNAPSHOT_STATE_MAP.get(
status,
VolumeSnapshotState.UNKNOWN
)
try:
created_dt = parse_date(created_at)
except ValueError:
created_dt = None
snapshot = VolumeSnapshot(id=data['id'], driver=self,
size=data['size'], extra=extra,
created=created_dt, state=state,
name=display_name)
return snapshot
def _to_size(self, api_flavor, price=None, bandwidth=None):
# if provider-specific subclasses can get better values for
# price/bandwidth, then can pass them in when they super().
if not price:
price = self._get_size_price(str(api_flavor['id']))
extra = api_flavor.get('OS-FLV-WITH-EXT-SPECS:extra_specs', {})
return OpenStackNodeSize(
id=api_flavor['id'],
name=api_flavor['name'],
ram=api_flavor['ram'],
disk=api_flavor['disk'],
vcpus=api_flavor['vcpus'],
ephemeral_disk=api_flavor.get('OS-FLV-EXT-DATA:ephemeral', None),
swap=api_flavor['swap'],
extra=extra,
bandwidth=bandwidth,
price=price,
driver=self,
)
def _get_size_price(self, size_id):
try:
return get_size_price(
driver_type='compute',
driver_name=self.api_name,
size_id=size_id,
)
except KeyError:
return(0.0)
def _extract_image_id_from_url(self, location_header):
path = urlparse.urlparse(location_header).path
image_id = path.split('/')[-1]
return image_id
def ex_rescue(self, node, password=None):
# Requires Rescue Mode extension
"""
Rescue a node
:param node: node
:type node: :class:`Node`
:param password: password
:type password: ``str``
:rtype: :class:`Node`
"""
if password:
resp = self._node_action(node, 'rescue', adminPass=password)
else:
resp = self._node_action(node, 'rescue')
password = json.loads(resp.body)['adminPass']
node.extra['password'] = password
return node
def ex_unrescue(self, node):
"""
Unrescue a node
:param node: node
:type node: :class:`Node`
:rtype: ``bool``
"""
resp = self._node_action(node, 'unrescue')
return resp.status == httplib.ACCEPTED
def _to_floating_ip_pools(self, obj):
pool_elements = obj['floating_ip_pools']
return [self._to_floating_ip_pool(pool) for pool in pool_elements]
def _to_floating_ip_pool(self, obj):
return OpenStack_1_1_FloatingIpPool(obj['name'], self.connection)
def ex_list_floating_ip_pools(self):
"""
List available floating IP pools
:rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpPool`
"""
return self._to_floating_ip_pools(
self.connection.request('/os-floating-ip-pools').object)
def _to_floating_ips(self, obj):
ip_elements = obj['floating_ips']
return [self._to_floating_ip(ip) for ip in ip_elements]
def _to_floating_ip(self, obj):
return OpenStack_1_1_FloatingIpAddress(id=obj['id'],
ip_address=obj['ip'],
pool=None,
node_id=obj['instance_id'],
driver=self)
def ex_list_floating_ips(self):
"""
List floating IPs
:rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress`
"""
return self._to_floating_ips(
self.connection.request('/os-floating-ips').object)
def ex_get_floating_ip(self, ip):
"""
Get specified floating IP
:param ip: floating IP to get
:type ip: ``str``
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
floating_ips = self.ex_list_floating_ips()
ip_obj, = [x for x in floating_ips if x.ip_address == ip]
return ip_obj
def ex_create_floating_ip(self, ip_pool=None):
"""
Create new floating IP. The ip_pool attribute is optional only if your
infrastructure has only one IP pool available.
:param ip_pool: name of the floating IP pool
:type ip_pool: ``str``
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
data = {'pool': ip_pool} if ip_pool is not None else {}
resp = self.connection.request('/os-floating-ips',
method='POST',
data=data)
data = resp.object['floating_ip']
id = data['id']
ip_address = data['ip']
return OpenStack_1_1_FloatingIpAddress(id=id,
ip_address=ip_address,
pool=None,
node_id=None,
driver=self)
def ex_delete_floating_ip(self, ip):
"""
Delete specified floating IP
:param ip: floating IP to remove
:type ip: :class:`OpenStack_1_1_FloatingIpAddress`
:rtype: ``bool``
"""
resp = self.connection.request('/os-floating-ips/%s' % ip.id,
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def ex_attach_floating_ip_to_node(self, node, ip):
"""
Attach the floating IP to the node
:param node: node
:type node: :class:`Node`
:param ip: floating IP to attach
:type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress`
:rtype: ``bool``
"""
address = ip.ip_address if hasattr(ip, 'ip_address') else ip
data = {
'addFloatingIp': {'address': address}
}
resp = self.connection.request('/servers/%s/action' % node.id,
method='POST', data=data)
return resp.status == httplib.ACCEPTED
def ex_detach_floating_ip_from_node(self, node, ip):
"""
Detach the floating IP from the node
:param node: node
:type node: :class:`Node`
:param ip: floating IP to remove
:type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress`
:rtype: ``bool``
"""
address = ip.ip_address if hasattr(ip, 'ip_address') else ip
data = {
'removeFloatingIp': {'address': address}
}
resp = self.connection.request('/servers/%s/action' % node.id,
method='POST', data=data)
return resp.status == httplib.ACCEPTED
def ex_get_metadata_for_node(self, node):
"""
Return the metadata associated with the node.
:param node: Node instance
:type node: :class:`Node`
:return: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:type tags: ``dict``
"""
return node.extra['metadata']
def ex_pause_node(self, node):
return self._post_simple_node_action(node, 'pause')
def ex_unpause_node(self, node):
return self._post_simple_node_action(node, 'unpause')
def ex_stop_node(self, node):
return self._post_simple_node_action(node, 'os-stop')
def ex_start_node(self, node):
return self._post_simple_node_action(node, 'os-start')
def ex_suspend_node(self, node):
return self._post_simple_node_action(node, 'suspend')
def ex_resume_node(self, node):
return self._post_simple_node_action(node, 'resume')
def _post_simple_node_action(self, node, action):
""" Post a simple, data-less action to the OS node action endpoint
:param `Node` node:
:param str action: the action to call
:return `bool`: a boolean that indicates success
"""
uri = '/servers/{node_id}/action'.format(node_id=node.id)
resp = self.connection.request(uri, method='POST', data={action: None})
return resp.status == httplib.ACCEPTED
class OpenStack_2_Connection(OpenStackComputeConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_2_ImageConnection(OpenStackImageConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_2_NetworkConnection(OpenStackNetworkConnection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_2_VolumeV2Connection(OpenStackVolumeV2Connection):
responseCls = OpenStack_1_1_Response
accept_format = 'application/json'
default_content_type = 'application/json; charset=UTF-8'
def encode_data(self, data):
return json.dumps(data)
class OpenStack_2_PortInterfaceState(Type):
"""
Standard states of OpenStack_2_PortInterfaceState
"""
BUILD = 'build'
ACTIVE = 'active'
DOWN = 'down'
UNKNOWN = 'unknown'
class OpenStack_2_NodeDriver(OpenStack_1_1_NodeDriver):
"""
OpenStack node driver.
"""
connectionCls = OpenStack_2_Connection
# Previously all image functionality was available through the
# compute API. This deprecated proxied API does not offer all
# functionality that the Glance Image service API offers.
# See https://developer.openstack.org/api-ref/compute/
#
# > These APIs are proxy calls to the Image service. Nova has deprecated
# > all the proxy APIs and users should use the native APIs instead. These
# > will fail with a 404 starting from microversion 2.36. See: Relevant
# > Image APIs.
#
# For example, managing image visibility and sharing machine
# images across tenants can not be done using the proxied image API in the
# compute endpoint, but it can be done with the Glance Image API.
# See https://developer.openstack.org/api-ref/
# image/v2/index.html#list-image-members
image_connectionCls = OpenStack_2_ImageConnection
image_connection = None
# Similarly not all node-related operations are exposed through the
# compute API
# See https://developer.openstack.org/api-ref/compute/
# For example, creating a new node in an OpenStack that is configured to
# create a new port for every new instance will make it so that if that
# port is detached it disappears. But if the port is manually created
# beforehand using the neutron network API and node is booted with that
# port pre-specified, then detaching that port later will result in that
# becoming a re-attachable resource much like a floating ip. So because
# even though this is the compute driver, we do connect to the networking
# API here because some operations relevant for compute can only be
# accessed from there.
network_connectionCls = OpenStack_2_NetworkConnection
network_connection = None
# Similarly all image operations are noe exposed through the block-storage
# API of the cinde service:
# https://developer.openstack.org/api-ref/block-storage/
volumev2_connectionCls = OpenStack_2_VolumeV2Connection
volumev2_connection = None
type = Provider.OPENSTACK
features = {"create_node": ["generates_password"]}
_networks_url_prefix = '/v2.0/networks'
_subnets_url_prefix = '/v2.0/subnets'
PORT_INTERFACE_MAP = {
'BUILD': OpenStack_2_PortInterfaceState.BUILD,
'ACTIVE': OpenStack_2_PortInterfaceState.ACTIVE,
'DOWN': OpenStack_2_PortInterfaceState.DOWN,
'UNKNOWN': OpenStack_2_PortInterfaceState.UNKNOWN
}
def __init__(self, *args, **kwargs):
original_connectionCls = self.connectionCls
self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
None))
if 'ex_force_auth_version' not in kwargs:
kwargs['ex_force_auth_version'] = '3.x_password'
original_ex_force_base_url = kwargs.get('ex_force_base_url')
# We run the init once to get the Glance V2 API connection
# and put that on the object under self.image_connection.
if original_ex_force_base_url or kwargs.get('ex_force_image_url'):
kwargs['ex_force_base_url'] = \
str(kwargs.pop('ex_force_image_url',
original_ex_force_base_url))
self.connectionCls = self.image_connectionCls
super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
self.image_connection = self.connection
# We run the init once to get the Cinder V2 API connection
# and put that on the object under self.volumev2_connection.
if original_ex_force_base_url or kwargs.get('ex_force_volume_url'):
kwargs['ex_force_base_url'] = \
str(kwargs.pop('ex_force_volume_url',
original_ex_force_base_url))
self.connectionCls = self.volumev2_connectionCls
super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
self.volumev2_connection = self.connection
# We run the init once to get the Neutron V2 API connection
# and put that on the object under self.network_connection.
if original_ex_force_base_url or kwargs.get('ex_force_network_url'):
kwargs['ex_force_base_url'] = \
str(kwargs.pop('ex_force_network_url',
original_ex_force_base_url))
self.connectionCls = self.network_connectionCls
super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
self.network_connection = self.connection
# We run the init once again to get the compute API connection
# and that's put under self.connection as normal.
self._ex_force_base_url = original_ex_force_base_url
if original_ex_force_base_url:
kwargs['ex_force_base_url'] = self._ex_force_base_url
self.connectionCls = original_connectionCls
super(OpenStack_2_NodeDriver, self).__init__(*args, **kwargs)
def _to_port(self, element):
created = element.get('created_at')
updated = element.get('updated_at')
return OpenStack_2_PortInterface(
id=element['id'],
state=self.PORT_INTERFACE_MAP.get(
element.get('status'), OpenStack_2_PortInterfaceState.UNKNOWN
),
created=created,
driver=self,
extra=dict(
admin_state_up=element['admin_state_up'],
allowed_address_pairs=element['allowed_address_pairs'],
binding_vnic_type=element['binding:vnic_type'],
device_id=element['device_id'],
description=element['description'],
device_owner=element['device_owner'],
fixed_ips=element['fixed_ips'],
mac_address=element['mac_address'],
name=element['name'],
network_id=element['network_id'],
project_id=element.get('project_id', None),
port_security_enabled=element.get('port_security_enabled',
None),
revision_number=element.get('revision_number', None),
security_groups=element['security_groups'],
tags=element.get('tags', None),
tenant_id=element['tenant_id'],
updated=updated,
)
)
def list_nodes(self, ex_all_tenants=False):
"""
List the nodes in a tenant
:param ex_all_tenants: List nodes for all the tenants. Note: Your user
must have admin privileges for this
functionality to work.
:type ex_all_tenants: ``bool``
"""
params = {}
if ex_all_tenants:
params = {'all_tenants': 1}
return self._to_nodes(self._paginated_request(
'/servers/detail', 'servers', self.connection, params=params))
def get_image(self, image_id):
"""
Get a NodeImage using the V2 Glance API
@inherits: :class:`OpenStack_1_1_NodeDriver.get_image`
:param image_id: ID of the image which should be used
:type image_id: ``str``
:rtype: :class:`NodeImage`
"""
return self._to_image(self.image_connection.request(
'/v2/images/%s' % (image_id,)).object)
def list_images(self, location=None, ex_only_active=True):
"""
Lists all active images using the V2 Glance API
@inherits: :class:`NodeDriver.list_images`
:param location: Which data center to list the images in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param ex_only_active: True if list only active (optional)
:type ex_only_active: ``bool``
"""
if location is not None:
raise NotImplementedError(
"location in list_images is not implemented "
"in the OpenStack_2_NodeDriver")
if not ex_only_active:
raise NotImplementedError(
"ex_only_active in list_images is not implemented "
"in the OpenStack_2_NodeDriver")
response = self.image_connection.request('/v2/images')
images = []
for image in response.object['images']:
images.append(self._to_image(image))
return images
def ex_update_image(self, image_id, data):
"""
Patch a NodeImage. Can be used to set visibility
:param image_id: ID of the image which should be used
:type image_id: ``str``
:param data: The data to PATCH, either a dict or a list
for example: [
{'op': 'replace', 'path': '/visibility', 'value': 'shared'}
]
:type data: ``dict|list``
:rtype: :class:`NodeImage`
"""
response = self.image_connection.request(
'/v2/images/%s' % (image_id,),
headers={'Content-type': 'application/'
'openstack-images-'
'v2.1-json-patch'},
method='PATCH', data=data
)
return self._to_image(response.object)
def ex_list_image_members(self, image_id):
"""
List all members of an image. See
https://developer.openstack.org/api-ref/image/v2/index.html#sharing
:param image_id: ID of the image of which the members should
be listed
:type image_id: ``str``
:rtype: ``list`` of :class:`NodeImageMember`
"""
response = self.image_connection.request(
'/v2/images/%s/members' % (image_id,)
)
image_members = []
for image_member in response.object['members']:
image_members.append(self._to_image_member(image_member))
return image_members
def ex_create_image_member(self, image_id, member_id):
"""
Give a project access to an image.
The image should have visibility status 'shared'.
Note that this is not an idempotent operation. If this action is
attempted using a tenant that is already in the image members
group the API will throw a Conflict (409).
See the 'create-image-member' section on
https://developer.openstack.org/api-ref/image/v2/index.html
:param str image_id: The ID of the image to share with the specified
tenant
:param str member_id: The ID of the project / tenant (the image member)
Note that this is the Keystone project ID and not the project name,
so something like e2151b1fe02d4a8a2d1f5fc331522c0a
:return None:
:param image_id: ID of the image to share
:type image_id: ``str``
:param project: ID of the project to give access to the image
:type image_id: ``str``
:rtype: ``list`` of :class:`NodeImageMember`
"""
data = {'member': member_id}
response = self.image_connection.request(
'/v2/images/%s/members' % image_id,
method='POST', data=data
)
return self._to_image_member(response.object)
def ex_get_image_member(self, image_id, member_id):
"""
Get a member of an image by id
:param image_id: ID of the image of which the member should
be listed
:type image_id: ``str``
:param member_id: ID of the member to list
:type image_id: ``str``
:rtype: ``list`` of :class:`NodeImageMember`
"""
response = self.image_connection.request(
'/v2/images/%s/members/%s' % (image_id, member_id)
)
return self._to_image_member(response.object)
def ex_accept_image_member(self, image_id, member_id):
"""
Accept a pending image as a member.
This call is idempotent unlike ex_create_image_member,
you can accept the same image many times.
:param image_id: ID of the image to accept
:type image_id: ``str``
:param project: ID of the project to accept the image as
:type image_id: ``str``
:rtype: ``bool``
"""
data = {'status': 'accepted'}
response = self.image_connection.request(
'/v2/images/%s/members/%s' % (image_id, member_id),
method='PUT', data=data
)
return self._to_image_member(response.object)
def _to_networks(self, obj):
networks = obj['networks']
return [self._to_network(network) for network in networks]
def _to_network(self, obj):
extra = {}
if obj.get('router:external', None):
extra['router:external'] = obj.get('router:external')
if obj.get('subnets', None):
extra['subnets'] = obj.get('subnets')
return OpenStackNetwork(id=obj['id'],
name=obj['name'],
cidr=None,
driver=self,
extra=extra)
def ex_list_networks(self):
"""
Get a list of Networks that are available.
:rtype: ``list`` of :class:`OpenStackNetwork`
"""
response = self.network_connection.request(
self._networks_url_prefix).object
return self._to_networks(response)
def ex_get_network(self, network_id):
"""
Retrieve the Network with the given ID
:param networkId: ID of the network
:type networkId: ``str``
:rtype :class:`OpenStackNetwork`
"""
request_url = "{networks_url_prefix}/{network_id}".format(
networks_url_prefix=self._networks_url_prefix,
network_id=network_id
)
response = self.network_connection.request(request_url).object
return self._to_network(response['network'])
def ex_create_network(self, name, **kwargs):
"""
Create a new Network
:param name: Name of network which should be used
:type name: ``str``
:rtype: :class:`OpenStackNetwork`
"""
data = {'network': {'name': name}}
# Add optional values
for key, value in kwargs.items():
data['network'][key] = value
response = self.network_connection.request(self._networks_url_prefix,
method='POST',
data=data).object
return self._to_network(response['network'])
def ex_delete_network(self, network):
"""
Delete a Network
:param network: Network which should be used
:type network: :class:`OpenStackNetwork`
:rtype: ``bool``
"""
resp = self.network_connection.request(
'%s/%s' % (self._networks_url_prefix,
network.id), method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def _to_subnets(self, obj):
subnets = obj['subnets']
return [self._to_subnet(subnet) for subnet in subnets]
def _to_subnet(self, obj):
extra = {}
if obj.get('router:external', None):
extra['router:external'] = obj.get('router:external')
if obj.get('subnets', None):
extra['subnets'] = obj.get('subnets')
return OpenStack_2_SubNet(id=obj['id'],
name=obj['name'],
cidr=obj['cidr'],
network_id=obj['network_id'],
driver=self,
extra=extra)
def ex_list_subnets(self):
"""
Get a list of Subnet that are available.
:rtype: ``list`` of :class:`OpenStack_2_SubNet`
"""
response = self.network_connection.request(
self._subnets_url_prefix).object
return self._to_subnets(response)
def ex_create_subnet(self, name, network, cidr, ip_version=4,
description='', dns_nameservers=None,
host_routes=None):
"""
Create a new Subnet
:param name: Name of subnet which should be used
:type name: ``str``
:param network: Parent network of the subnet
:type network: ``OpenStackNetwork``
:param cidr: cidr of network which should be used
:type cidr: ``str``
:param ip_version: ip_version of subnet which should be used
:type ip_version: ``int``
:param description: Description for the resource.
:type description: ``str``
:param dns_nameservers: List of dns name servers.
:type dns_nameservers: ``list`` of ``str``
:param host_routes: Additional routes for the subnet.
:type host_routes: ``list`` of ``str``
:rtype: :class:`OpenStack_2_SubNet`
"""
data = {
'subnet':
{
'cidr': cidr,
'network_id': network.id,
'ip_version': ip_version,
'name': name or '',
'description': description or '',
'dns_nameservers': dns_nameservers or [],
'host_routes': host_routes or []
}
}
response = self.network_connection.request(
self._subnets_url_prefix, method='POST', data=data).object
return self._to_subnet(response['subnet'])
def ex_delete_subnet(self, subnet):
"""
Delete a Subnet
:param subnet: Subnet which should be deleted
:type subnet: :class:`OpenStack_2_SubNet`
:rtype: ``bool``
"""
resp = self.network_connection.request('%s/%s' % (
self._subnets_url_prefix, subnet.id), method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def ex_update_subnet(self, subnet, name=None, description=None,
dns_nameservers=None, host_routes=None):
"""
Update data of an existing SubNet
:param subnet: Subnet which should be updated
:type subnet: :class:`OpenStack_2_SubNet`
:param name: Name of subnet which should be used
:type name: ``str``
:param description: Description for the resource.
:type description: ``str``
:param dns_nameservers: List of dns name servers.
:type dns_nameservers: ``list`` of ``str``
:param host_routes: Additional routes for the subnet.
:type host_routes: ``list`` of ``str``
:rtype: :class:`OpenStack_2_SubNet`
"""
data = {'subnet': {}}
if name is not None:
data['subnet']['name'] = name
if description is not None:
data['subnet']['description'] = description
if dns_nameservers is not None:
data['subnet']['dns_nameservers'] = dns_nameservers
if host_routes is not None:
data['subnet']['host_routes'] = host_routes
response = self.network_connection.request(
"%s/%s" % (self._subnets_url_prefix, subnet.id),
method='PUT', data=data).object
return self._to_subnet(response['subnet'])
def ex_list_ports(self):
"""
List all OpenStack_2_PortInterfaces
https://developer.openstack.org/api-ref/network/v2/#list-ports
:rtype: ``list`` of :class:`OpenStack_2_PortInterface`
"""
response = self._paginated_request(
'/v2.0/ports', 'ports', self.network_connection)
return [self._to_port(port) for port in response['ports']]
def ex_delete_port(self, port):
"""
Delete an OpenStack_2_PortInterface
https://developer.openstack.org/api-ref/network/v2/#delete-port
:param port: port interface to remove
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
response = self.network_connection.request(
'/v2.0/ports/%s' % port.id, method='DELETE'
)
return response.success()
def ex_detach_port_interface(self, node, port):
"""
Detaches an OpenStack_2_PortInterface interface from a Node.
:param node: node
:type node: :class:`Node`
:param port: port interface to detach
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
return self.connection.request(
'/servers/%s/os-interface/%s' % (node.id, port.id),
method='DELETE'
).success()
def ex_attach_port_interface(self, node, port):
"""
Attaches an OpenStack_2_PortInterface to a Node.
:param node: node
:type node: :class:`Node`
:param port: port interface to attach
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
data = {
'interfaceAttachment': {
'port_id': port.id
}
}
return self.connection.request(
'/servers/{}/os-interface'.format(node.id),
method='POST', data=data
).success()
def ex_create_port(self, network, description=None,
admin_state_up=True, name=None):
"""
Creates a new OpenStack_2_PortInterface
:param network: ID of the network where the newly created
port should be attached to
:type network: :class:`OpenStackNetwork`
:param description: Description of the port
:type description: str
:param admin_state_up: The administrative state of the
resource, which is up or down
:type admin_state_up: bool
:param name: Human-readable name of the resource
:type name: str
:rtype: :class:`OpenStack_2_PortInterface`
"""
data = {
'port':
{
'description': description or '',
'admin_state_up': admin_state_up,
'name': name or '',
'network_id': network.id,
}
}
response = self.network_connection.request(
'/v2.0/ports', method='POST', data=data
)
return self._to_port(response.object['port'])
def ex_get_port(self, port_interface_id):
"""
Retrieve the OpenStack_2_PortInterface with the given ID
:param port_interface_id: ID of the requested port
:type port_interface_id: str
:return: :class:`OpenStack_2_PortInterface`
"""
response = self.network_connection.request(
'/v2.0/ports/{}'.format(port_interface_id), method='GET'
)
return self._to_port(response.object['port'])
def ex_update_port(self, port, description=None,
admin_state_up=None, name=None,
port_security_enabled=None,
qos_policy_id=None, security_groups=None):
"""
Update a OpenStack_2_PortInterface
:param port: port interface to update
:type port: :class:`OpenStack_2_PortInterface`
:param description: Description of the port
:type description: ``str``
:param admin_state_up: The administrative state of the
resource, which is up or down
:type admin_state_up: ``bool``
:param name: Human-readable name of the resource
:type name: ``str``
:param port_security_enabled: The port security status
:type port_security_enabled: ``bool``
:param qos_policy_id: QoS policy associated with the port
:type qos_policy_id: ``str``
:param security_groups: The IDs of security groups applied
:type security_groups: ``list`` of ``str``
:rtype: :class:`OpenStack_2_PortInterface`
"""
data = {'port': {}}
if description is not None:
data['port']['description'] = description
if admin_state_up is not None:
data['port']['admin_state_up'] = admin_state_up
if name is not None:
data['port']['name'] = name
if port_security_enabled is not None:
data['port']['port_security_enabled'] = port_security_enabled
if qos_policy_id is not None:
data['port']['qos_policy_id'] = qos_policy_id
if security_groups is not None:
data['port']['security_groups'] = security_groups
response = self.network_connection.request(
'/v2.0/ports/{}'.format(port.id), method='PUT', data=data
)
return self._to_port(response.object['port'])
def list_volumes(self):
"""
Get a list of Volumes that are available.
:rtype: ``list`` of :class:`StorageVolume`
"""
return self._to_volumes(self._paginated_request(
'/volumes/detail', 'volumes', self.volumev2_connection))
def ex_get_volume(self, volumeId):
"""
Retrieve the StorageVolume with the given ID
:param volumeId: ID of the volume
:type volumeId: ``string``
:return: :class:`StorageVolume`
"""
return self._to_volume(
self.volumev2_connection.request('/volumes/%s' % volumeId).object)
def create_volume(self, size, name, location=None, snapshot=None,
ex_volume_type=None, ex_image_ref=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:param ex_volume_type: What kind of volume to create.
(optional)
:type ex_volume_type: ``str``
:param ex_image_ref: The image to create the volume from
when creating a bootable volume (optional)
:type ex_image_ref: ``str``
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
volume = {
'name': name,
'description': name,
'size': size,
'metadata': {
'contents': name,
},
}
if ex_volume_type:
volume['volume_type'] = ex_volume_type
if ex_image_ref:
volume['imageRef'] = ex_image_ref
if location:
volume['availability_zone'] = location
if snapshot:
volume['snapshot_id'] = snapshot.id
resp = self.volumev2_connection.request('/volumes',
method='POST',
data={'volume': volume})
return self._to_volume(resp.object)
def destroy_volume(self, volume):
"""
Delete a Volume.
:param volume: Volume to be deleted
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
return self.volumev2_connection.request('/volumes/%s' % volume.id,
method='DELETE').success()
def ex_list_snapshots(self):
"""
Get a list of Snapshot that are available.
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
return self._to_snapshots(self._paginated_request(
'/snapshots/detail', 'snapshots', self.volumev2_connection))
def create_volume_snapshot(self, volume, name=None, ex_description=None,
ex_force=True):
"""
Create snapshot from volume
:param volume: Instance of `StorageVolume`
:type volume: `StorageVolume`
:param name: Name of snapshot (optional)
:type name: `str` | `NoneType`
:param ex_description: Description of the snapshot (optional)
:type ex_description: `str` | `NoneType`
:param ex_force: Specifies if we create a snapshot that is not in
state `available`. For example `in-use`. Defaults
to True. (optional)
:type ex_force: `bool`
:rtype: :class:`VolumeSnapshot`
"""
data = {'snapshot': {'volume_id': volume.id, 'force': ex_force}}
if name is not None:
data['snapshot']['name'] = name
if ex_description is not None:
data['snapshot']['description'] = ex_description
return self._to_snapshot(
self.volumev2_connection.request('/snapshots', method='POST',
data=data).object)
def destroy_volume_snapshot(self, snapshot):
"""
Delete a Volume Snapshot.
:param snapshot: Snapshot to be deleted
:type snapshot: :class:`VolumeSnapshot`
:rtype: ``bool``
"""
resp = self.volumev2_connection.request('/snapshots/%s' % snapshot.id,
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def ex_list_security_groups(self):
"""
Get a list of Security Groups that are available.
:rtype: ``list`` of :class:`OpenStackSecurityGroup`
"""
return self._to_security_groups(
self.network_connection.request('/v2.0/security-groups').object)
def ex_create_security_group(self, name, description):
"""
Create a new Security Group
:param name: Name of the new Security Group
:type name: ``str``
:param description: Description of the new Security Group
:type description: ``str``
:rtype: :class:`OpenStackSecurityGroup`
"""
return self._to_security_group(self.network_connection .request(
'/v2.0/security-groups', method='POST',
data={'security_group': {'name': name, 'description': description}}
).object['security_group'])
def ex_delete_security_group(self, security_group):
"""
Delete a Security Group.
:param security_group: Security Group should be deleted
:type security_group: :class:`OpenStackSecurityGroup`
:rtype: ``bool``
"""
resp = self.network_connection.request('/v2.0/security-groups/%s' %
(security_group.id),
method='DELETE')
return resp.status == httplib.NO_CONTENT
def _to_security_group_rule(self, obj):
ip_range = group = tenant_id = parent_id = None
protocol = from_port = to_port = direction = None
if 'parent_group_id' in obj:
if obj['group'] == {}:
ip_range = obj['ip_range'].get('cidr', None)
else:
group = obj['group'].get('name', None)
tenant_id = obj['group'].get('tenant_id', None)
parent_id = obj['parent_group_id']
from_port = obj['from_port']
to_port = obj['to_port']
protocol = obj['ip_protocol']
else:
ip_range = obj.get('remote_ip_prefix', None)
group = obj.get('remote_group_id', None)
tenant_id = obj.get('tenant_id', None)
parent_id = obj['security_group_id']
from_port = obj['port_range_min']
to_port = obj['port_range_max']
protocol = obj['protocol']
return OpenStackSecurityGroupRule(
id=obj['id'], parent_group_id=parent_id,
ip_protocol=protocol, from_port=from_port,
to_port=to_port, driver=self, ip_range=ip_range,
group=group, tenant_id=tenant_id, direction=direction)
def ex_create_security_group_rule(self, security_group, ip_protocol,
from_port, to_port, cidr=None,
source_security_group=None):
"""
Create a new Rule in a Security Group
:param security_group: Security Group in which to add the rule
:type security_group: :class:`OpenStackSecurityGroup`
:param ip_protocol: Protocol to which this rule applies
Examples: tcp, udp, ...
:type ip_protocol: ``str``
:param from_port: First port of the port range
:type from_port: ``int``
:param to_port: Last port of the port range
:type to_port: ``int``
:param cidr: CIDR notation of the source IP range for this rule
:type cidr: ``str``
:param source_security_group: Existing Security Group to use as the
source (instead of CIDR)
:type source_security_group: L{OpenStackSecurityGroup
:rtype: :class:`OpenStackSecurityGroupRule`
"""
source_security_group_id = None
if type(source_security_group) == OpenStackSecurityGroup:
source_security_group_id = source_security_group.id
return self._to_security_group_rule(self.network_connection.request(
'/v2.0/security-group-rules', method='POST',
data={'security_group_rule': {
'direction': 'ingress',
'protocol': ip_protocol,
'port_range_min': from_port,
'port_range_max': to_port,
'remote_ip_prefix': cidr,
'remote_group_id': source_security_group_id,
'security_group_id': security_group.id}}
).object['security_group_rule'])
def ex_delete_security_group_rule(self, rule):
"""
Delete a Rule from a Security Group.
:param rule: Rule should be deleted
:type rule: :class:`OpenStackSecurityGroupRule`
:rtype: ``bool``
"""
resp = self.network_connection.request(
'/v2.0/security-group-rules/%s' % (rule.id), method='DELETE')
return resp.status == httplib.NO_CONTENT
def ex_remove_security_group_from_node(self, security_group, node):
"""
Remove a Security Group from a node.
:param security_group: Security Group to remove from node.
:type security_group: :class:`OpenStackSecurityGroup`
:param node: Node to remove the Security Group.
:type node: :class:`Node`
:rtype: ``bool``
"""
server_params = {'name': security_group.name}
resp = self._node_action(node, 'removeSecurityGroup', **server_params)
return resp.status == httplib.ACCEPTED
def _to_floating_ip_pool(self, obj):
return OpenStack_2_FloatingIpPool(obj['id'], obj['name'],
self.network_connection)
def _to_floating_ip_pools(self, obj):
pool_elements = obj['networks']
return [self._to_floating_ip_pool(pool) for pool in pool_elements]
def ex_list_floating_ip_pools(self):
"""
List available floating IP pools
:rtype: ``list`` of :class:`OpenStack_2_FloatingIpPool`
"""
return self._to_floating_ip_pools(
self.network_connection.request('/v2.0/networks?router:external'
'=True&fields=id&fields='
'name').object)
def _to_routers(self, obj):
routers = obj['routers']
return [self._to_router(router) for router in routers]
def _to_router(self, obj):
extra = {}
extra['external_gateway_info'] = obj['external_gateway_info']
extra['routes'] = obj['routes']
return OpenStack_2_Router(id=obj['id'],
name=obj['name'],
status=obj['status'],
driver=self,
extra=extra)
def ex_list_routers(self):
"""
Get a list of Routers that are available.
:rtype: ``list`` of :class:`OpenStack_2_Router`
"""
response = self.network_connection.request(
'/v2.0/routers').object
return self._to_routers(response)
def ex_create_router(self, name, description='', admin_state_up=True,
external_gateway_info=None):
"""
Create a new Router
:param name: Name of router which should be used
:type name: ``str``
:param description: Description of the port
:type description: ``str``
:param admin_state_up: The administrative state of the
resource, which is up or down
:type admin_state_up: ``bool``
:param external_gateway_info: The external gateway information
:type external_gateway_info: ``dict``
:rtype: :class:`OpenStack_2_Router`
"""
data = {
'router':
{
'name': name or '',
'description': description or '',
'admin_state_up': admin_state_up,
}
}
if external_gateway_info:
data['router']['external_gateway_info'] = external_gateway_info
response = self.network_connection.request(
'/v2.0/routers', method='POST', data=data).object
return self._to_router(response['router'])
def ex_delete_router(self, router):
"""
Delete a Router
:param router: Router which should be deleted
:type router: :class:`OpenStack_2_Router`
:rtype: ``bool``
"""
resp = self.network_connection.request('%s/%s' % (
'/v2.0/routers', router.id), method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def _manage_router_interface(self, router, op, subnet=None, port=None):
"""
Add/Remove interface to router
:param router: Router to add/remove the interface
:type router: :class:`OpenStack_2_Router`
:param op: Operation to perform: 'add' or 'remove'
:type op: ``str``
:param subnet: Subnet object to be added to the router
:type subnet: :class:`OpenStack_2_SubNet`
:param port: Port object to be added to the router
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
data = {}
if subnet:
data['subnet_id'] = subnet.id
elif port:
data['port_id'] = port.id
else:
raise OpenStackException("Error in router interface: "
"port or subnet are None.", 500,
self)
resp = self.network_connection.request('%s/%s/%s_router_interface' % (
'/v2.0/routers', router.id, op), method='PUT', data=data)
return resp.status == httplib.OK
def ex_add_router_port(self, router, port):
"""
Add port to a router
:param router: Router to add the port
:type router: :class:`OpenStack_2_Router`
:param port: Port object to be added to the router
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
return self._manage_router_interface(router, 'add', port=port)
def ex_del_router_port(self, router, port):
"""
Remove port from a router
:param router: Router to remove the port
:type router: :class:`OpenStack_2_Router`
:param port: Port object to be added to the router
:type port: :class:`OpenStack_2_PortInterface`
:rtype: ``bool``
"""
return self._manage_router_interface(router, 'remove', port=port)
def ex_add_router_subnet(self, router, subnet):
"""
Add subnet to a router
:param router: Router to add the subnet
:type router: :class:`OpenStack_2_Router`
:param subnet: Subnet object to be added to the router
:type subnet: :class:`OpenStack_2_SubNet`
:rtype: ``bool``
"""
return self._manage_router_interface(router, 'add', subnet=subnet)
def ex_del_router_subnet(self, router, subnet):
"""
Remove subnet to a router
:param router: Router to remove the subnet
:type router: :class:`OpenStack_2_Router`
:param subnet: Subnet object to be added to the router
:type subnet: :class:`OpenStack_2_SubNet`
:rtype: ``bool``
"""
return self._manage_router_interface(router, 'remove', subnet=subnet)
class OpenStack_1_1_FloatingIpPool(object):
"""
Floating IP Pool info.
"""
def __init__(self, name, connection):
self.name = name
self.connection = connection
def list_floating_ips(self):
"""
List floating IPs in the pool
:rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress`
"""
return self._to_floating_ips(
self.connection.request('/os-floating-ips').object)
def _to_floating_ips(self, obj):
ip_elements = obj['floating_ips']
return [self._to_floating_ip(ip) for ip in ip_elements]
def _to_floating_ip(self, obj):
return OpenStack_1_1_FloatingIpAddress(id=obj['id'],
ip_address=obj['ip'],
pool=self,
node_id=obj['instance_id'],
driver=self.connection.driver)
def get_floating_ip(self, ip):
"""
Get specified floating IP from the pool
:param ip: floating IP to get
:type ip: ``str``
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
ip_obj, = [x for x in self.list_floating_ips() if x.ip_address == ip]
return ip_obj
def create_floating_ip(self):
"""
Create new floating IP in the pool
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
resp = self.connection.request('/os-floating-ips',
method='POST',
data={'pool': self.name})
data = resp.object['floating_ip']
id = data['id']
ip_address = data['ip']
return OpenStack_1_1_FloatingIpAddress(id=id,
ip_address=ip_address,
pool=self,
node_id=None,
driver=self.connection.driver)
def delete_floating_ip(self, ip):
"""
Delete specified floating IP from the pool
:param ip: floating IP to remove
:type ip: :class:`OpenStack_1_1_FloatingIpAddress`
:rtype: ``bool``
"""
resp = self.connection.request('/os-floating-ips/%s' % ip.id,
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def __repr__(self):
return ('<OpenStack_1_1_FloatingIpPool: name=%s>' % self.name)
class OpenStack_1_1_FloatingIpAddress(object):
"""
Floating IP info.
"""
def __init__(self, id, ip_address, pool, node_id=None, driver=None):
self.id = str(id)
self.ip_address = ip_address
self.pool = pool
self.node_id = node_id
self.driver = driver
def delete(self):
"""
Delete this floating IP
:rtype: ``bool``
"""
if self.pool is not None:
return self.pool.delete_floating_ip(self)
elif self.driver is not None:
return self.driver.ex_delete_floating_ip(self)
def __repr__(self):
return ('<OpenStack_1_1_FloatingIpAddress: id=%s, ip_addr=%s,'
' pool=%s, driver=%s>'
% (self.id, self.ip_address, self.pool, self.driver))
class OpenStack_2_FloatingIpPool(object):
"""
Floating IP Pool info.
"""
def __init__(self, id, name, connection):
self.id = id
self.name = name
self.connection = connection
def _to_floating_ips(self, obj):
ip_elements = obj['floatingips']
return [self._to_floating_ip(ip) for ip in ip_elements]
def _to_floating_ip(self, obj):
instance_id = None
# In neutron version prior to 13.0.0 port_details does not exists
if 'port_details' not in obj and 'port_id' in obj and obj['port_id']:
port = self.connection.driver.ex_get_port(obj['port_id'])
if port:
obj['port_details'] = {"device_id": port.extra["device_id"],
"device_owner":
port.extra["device_owner"],
"mac_address":
port.extra["mac_address"]}
if 'port_details' in obj and obj['port_details']:
if obj['port_details']['device_owner'] in ['compute:nova',
'compute:None']:
instance_id = obj['port_details']['device_id']
ip_address = obj['floating_ip_address']
return OpenStack_1_1_FloatingIpAddress(id=obj['id'],
ip_address=ip_address,
pool=self,
node_id=instance_id,
driver=self.connection.driver)
def list_floating_ips(self):
"""
List floating IPs in the pool
:rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress`
"""
return self._to_floating_ips(
self.connection.request('/v2.0/floatingips').object)
def get_floating_ip(self, ip):
"""
Get specified floating IP from the pool
:param ip: floating IP to get
:type ip: ``str``
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
floating_ips = self._to_floating_ips(
self.connection.request('/v2.0/floatingips?floating_ip_address'
'=%s' % ip).object)
return floating_ips[0]
def create_floating_ip(self):
"""
Create new floating IP in the pool
:rtype: :class:`OpenStack_1_1_FloatingIpAddress`
"""
resp = self.connection.request('/v2.0/floatingips',
method='POST',
data={'floatingip':
{'floating_network_id': self.id}}
)
data = resp.object['floatingip']
id = data['id']
ip_address = data['floating_ip_address']
return OpenStack_1_1_FloatingIpAddress(id=id,
ip_address=ip_address,
pool=self,
node_id=None,
driver=self.connection.driver)
def delete_floating_ip(self, ip):
"""
Delete specified floating IP from the pool
:param ip: floating IP to remove
:type ip: :class:`OpenStack_1_1_FloatingIpAddress`
:rtype: ``bool``
"""
resp = self.connection.request('/v2.0/floatingips/%s' % ip.id,
method='DELETE')
return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
def __repr__(self):
return ('<OpenStack_2_FloatingIpPool: name=%s>' % self.name)
class OpenStack_2_SubNet(object):
"""
A Virtual SubNet.
"""
def __init__(self, id, name, cidr, network_id, driver, extra=None):
self.id = str(id)
self.name = name
self.cidr = cidr
self.network_id = network_id
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return '<OpenStack_2_SubNet id="%s" name="%s" cidr="%s">' % (self.id,
self.name,
self.cidr)
class OpenStack_2_Router(object):
"""
A Virtual Router.
"""
def __init__(self, id, name, status, driver, extra=None):
self.id = str(id)
self.name = name
self.status = status
self.driver = driver
self.extra = extra or {}
def __repr__(self):
return '<OpenStack_2_Router id="%s" name="%s">' % (self.id,
self.name)
class OpenStack_2_PortInterface(UuidMixin):
"""
Port Interface info. Similar in functionality to a floating IP (can be
attached / detached from a compute instance) but implementation-wise a
bit different.
> A port is a connection point for attaching a single device, such as the
> NIC of a server, to a network. The port also describes the associated
> network configuration, such as the MAC and IP addresses to be used on
> that port.
https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/port.html
Also see:
https://developer.openstack.org/api-ref/compute/#port-interfaces-servers-os-interface
"""
def __init__(self, id, state, driver, created=None, extra=None):
"""
:param id: Port Interface ID.
:type id: ``str``
:param state: State of the OpenStack_2_PortInterface.
:type state: :class:`.OpenStack_2_PortInterfaceState`
:param created: A datetime object that represents when the
port interface was created
:type created: ``datetime.datetime``
:param extra: Optional provided specific attributes associated with
this image.
:type extra: ``dict``
"""
self.id = str(id)
self.state = state
self.driver = driver
self.created = created
self.extra = extra or {}
UuidMixin.__init__(self)
def delete(self):
"""
Delete this Port Interface
:rtype: ``bool``
"""
return self.driver.ex_delete_port(self)
def __repr__(self):
return (('<OpenStack_2_PortInterface: id=%s, state=%s, '
'driver=%s ...>')
% (self.id, self.state, self.driver.name))
| apache-2.0 | -4,344,738,230,865,552,400 | 34.097555 | 120 | 0.538004 | false |
drtuxwang/system-config | bin/tocapital.py | 1 | 2244 | #!/usr/bin/env python3
"""
Print arguments with first letter in upper case (camel case).
"""
import argparse
import glob
import os
import signal
import sys
from typing import List
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_words(self) -> List[str]:
"""
Return list of words.
"""
return self._args.words
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Print arguments wth first letter in upper case.',
)
parser.add_argument('words', nargs='+', metavar='word', help='A word.')
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
sys.exit(0)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
@staticmethod
def run() -> int:
"""
Start program
"""
options = Options()
words = options.get_words()
cwords = []
for word in words:
cparts = []
for part in word.split('-'):
cparts.append(part[:1].upper() + part[1:].lower())
cwords.append('-'.join(cparts))
print(" ".join(cwords))
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | -7,888,770,518,914,146,000 | 21.44 | 79 | 0.499554 | false |
staugur/SwarmOps | src/config.py | 1 | 3033 | # -*- coding: utf-8 -*-
"""
SwarmOps.config
~~~~~~~~~~~~~~
The program configuration file, the preferred configuration item, reads the system environment variable first.
:copyright: (c) 2018 by staugur.
:license: MIT, see LICENSE for more details.
"""
from os import getenv
GLOBAL = {
"ProcessName": "SwarmOps",
#自定义进程名.
"Host": getenv("swarmops_host", "0.0.0.0"),
#监听地址
"Port": getenv("swarmops_port", 10130),
#监听端口
"LogLevel": getenv("swarmops_loglevel", "DEBUG"),
#应用日志记录级别, 依次为 DEBUG, INFO, WARNING, ERROR, CRITICAL.
}
SSO = {
"app_name": getenv("swarmops_sso_app_name", GLOBAL["ProcessName"]),
# Passport应用管理中注册的应用名
"app_id": getenv("swarmops_sso_app_id", "app_id"),
# Passport应用管理中注册返回的`app_id`
"app_secret": getenv("swarmops_sso_app_secret", "app_secret"),
# Passport应用管理中注册返回的`app_secret`
"sso_server": getenv("swarmops_sso_server", "YourPassportFQDN"),
# Passport部署允许的完全合格域名根地址,例如作者的`https://passport.saintic.com`
"sso_allow": getenv("swarmops_sso_allow"),
# 允许登录的uid列表,格式是: uid1,uid2,...,uidn
"sso_deny": getenv("swarmops_sso_deny")
# 拒绝登录的uid列表, 格式同上
}
# 系统配置
SYSTEM = {
"HMAC_SHA256_KEY": getenv("swarmops_hmac_sha256_key", "273d32c8d797fa715190c7408ad73811"),
# hmac sha256 key
"AES_CBC_KEY": getenv("swarmops_aes_cbc_key", "YRRGBRYQqrV1gv5A"),
# utils.aes_cbc.CBC类中所用加密key
"JWT_SECRET_KEY": getenv("swarmops_jwt_secret_key", "WBlE7_#qDf2vRb@vM!Zw#lqrg@rdd3A6"),
# utils.jwt.JWTUtil类中所用加密key
}
#存储配置段
STORAGE={
"SwarmStorageMode": getenv("swarmops_swarmstoragemode", "local"),
#存储Swarm集群信息的方式, 可选`local(本地文件存储)`, `redis`
#使用local存储,数据将会序列化存储到logs/SwarmKey、ActiveKey文件中;
#使用redis存储,便可以多点部署,数据将会序列化存储到redis中。
"Connection": getenv("swarmops_StorageConnection", "redis://ip:port:password"),
#当SwarmStorageMode不为local时,此配置项有意义。
#此配置项设置存储后端的连接信息, 如redis, redis没有密码则留空:password部分
"SwarmKey": getenv("swarmops_StorageSwarmKey", "SwarmOps_All"),
#存储后端存储所有Swarm数据的Key索引
"ActiveKey": getenv("swarmops_StorageActiveKey", "SwarmOps_Active"),
#存储后端存储活跃集群数据的Key索引
}
#私有仓配置段
REGISTRY={
"RegistryAddr": getenv("swarmops_RegistryAddr", "https://registry.saintic.com"),
#私有仓地址, 例如https://docker.io, http://ip:port
"RegistryVersion": getenv("swarmops_RegistryVersion", 1),
#私有仓版本, 1、2
"RegistryAuthentication": getenv("swarmops_RegistryAuthentication", None)
#认证, 目前不可用
}
| bsd-3-clause | -8,325,207,699,234,633,000 | 25.178947 | 114 | 0.673904 | false |
richardingham/octopus | octopus/notifier/sms.py | 1 | 1629 | # Twisted Imports
from twisted.internet import reactor, protocol, defer
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.ssl import ClientContextFactory
from twisted.python import log
# System Imports
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
# Sibling Imports
import util as notifier_util
class WebClientContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class _Receiver (protocol.Protocol):
def __init__ (self, d):
self.buf = ''
self.d = d
def dataReceived (self, data):
self.buf += data
def connectionLost (self, reason):
# TODO: test if reason is twisted.web.client.ResponseDone, if not, do an errback
self.d.callback(self.buf)
class ClockworkSMS (object):
def __init__ (self, api_key):
contextFactory = WebClientContextFactory()
self.agent = Agent(reactor, contextFactory)
self._api_key = api_key
def notify (self, destination, message):
destinations = destination.split(",")
if len(destinations) > 50:
log.msg("Max 50 SMS recipients allowed")
params = {
"key": self._api_key,
"to": destination,
"content": message.encode("utf_8", "replace")
}
uri = "https://api.clockworksms.com/http/send.aspx?{:s}"
d = self.agent.request(
"GET",
uri.format(urlencode(params)),
Headers({
'User-Agent': ['octopus'],
}),
None
)
def handle_response (response):
d = defer.Deferred()
response.deliverBody(_Receiver(d))
return d
d.addCallback(handle_response)
return d
| mit | -5,075,994,737,181,868,000 | 21.943662 | 82 | 0.711479 | false |
OpenClovis/SAFplus-Availability-Scalability-Platform | doc/doclayout/jsclassindexpage.py | 1 | 1381 | """<module>
This module generates an html page that lists all classes
"""
import pdb
from PyHtmlGen.gen import *
from PyHtmlGen.document import *
from PyHtmlGen.htmldoc import *
from PyHtmlGen.bar import *
# from layoutstyle import *
from PyHtmlGen.layouttable import *
from PyHtmlGen.table import *
from PyHtmlGen.imagehtml import *
from PyHtmlGen.menu import *
from PyHtmlGen.layouthtml import *
from PyHtmlGen.form import *
from PyHtmlGen.attribute import *
from PyHtmlGen.json import *
from PyHtmlGen.cssclass import *
from common import *
from htmlcommon import *
from jscommon import *
from constants import *
def genClasses(cllist):
header = ["Class","Section","File"]
body = []
for obj in cllist:
body.append([obj2tlink(obj,PageLocCenter),parenttLink(obj,TagSection,PageLocCenter),parenttLink(obj,TagFile,PageLocCenter)])
grid = GridFromList(header, body )
#grid.RowBackground(Color(250,250,100),[Color(200,200,200),Color(240,240,240)])
grid.RowAttrs({"class":"classIndexHeaderRow"},[{"class":"classIndexRowA"},{"class":"classIndexRowB"}])
return grid
def generate(objs,cfg,args,tagDict):
objs.sort(key=lambda x: x.name)
mv = genClasses(objs)
hdr = VSplit([resize(2,"Class Directory")])
ctr = HSplit([BR,mv])
fname = "Class.html"
page = [hdr,ctr]
WriteFile(FilePrefix + fname,page,HtmlFragment())
return (fname,page)
#</module>
| gpl-2.0 | 4,678,383,709,413,481,000 | 26.078431 | 128 | 0.737871 | false |
jcsp/manila | manila_tempest_tests/plugin.py | 1 | 1495 | # Copyright 2015 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from manila_tempest_tests import config as config_share
class ManilaTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "manila_tempest_tests/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(
conf, config_share.service_available_group,
config_share.ServiceAvailableGroup)
config.register_opt_group(conf, config_share.share_group,
config_share.ShareGroup)
def get_opt_lists(self):
return [(config_share.share_group.name, config_share.ShareGroup)]
| apache-2.0 | -4,885,370,306,299,647,000 | 35.463415 | 78 | 0.688294 | false |
jj4jj/sdv | ws_server.py | 1 | 2339 | #-*-coding:utf8-*-
from threading import Thread
import tornado.ioloop
import tornado.web
import tornado.websocket
import json
import log
import config
try:
# py2
from urllib.parse import urlparse
except ImportError:
# py3
from urlparse import urlparse
class WebSocketMsgHandler():
def __init__(self):
self.client = None
pass
def on_client_open(self, client):
self.client = client
log.debug('open')
def on_client_message(self, msg):
log.debug('msg:'+msg)
self.reply(msg)
def on_client_close(self):
log.debug('close')
self.client = None
def reply(self, mtype, data):
jmsg = json.dumps({'type':mtype, 'data':data})
log.debug('reply msg:'+jmsg)
self.client.write_message(jmsg)
class WebSocketServer(Thread):
def __init__(self, uri, dispatcher, host=None, port=8888):
Thread.__init__(self)
#############################################
self.uri = uri
self.dispatcher = dispatcher
self.port = port
self.host = host
class _WebSocketServerHandlerProxy(tornado.websocket.WebSocketHandler):
hb_msg = json.dumps({u'type': u'pong', u'data': u'-*-heart-beat-*-'})
def open(self):
dispatcher.on_client_open(self)
def on_message(self, message):
objmsg = json.loads(message)
if objmsg['type'] == 'ping':
self.write_message(self.hb_msg)
else:
dispatcher.on_client_message(objmsg)
def on_close(self):
dispatcher.on_client_close()
def check_origin(self, origin):
return True
self.app = tornado.web.Application([(config.WSS_PREFIX_RGX, _WebSocketServerHandlerProxy)])
self.app.listen(address=host, port=port)
self.io = tornado.ioloop.IOLoop.current()
def stop(self):
self.io.stop()
pass
def run(self):
self.io.start()
if __name__ == "__main__":
ws = WebSocketServer('', WebSocketMsgHandler())
ws.setDaemon(True)
import signal
def stop_ws():
ws.stop()
signal.signal(signal.SIGINT, stop_ws)
import sys
signal.signal(signal.SIGTERM, sys.exit)
ws.start()
ws.join()
| mit | 4,949,130,193,569,616,000 | 24.703297 | 99 | 0.568619 | false |
dzolnierz/mysql-utilities | mysql/utilities/__init__.py | 1 | 2324 | #
# Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""mysql.utilities"""
# Major, Minor, Patch, Status
VERSION = (1, 5, 6, 'GA', 0)
# Future versions will have to include only the X, Y (no Z).
VERSION_STRING = "%s.%s.%s" % VERSION[0:3]
COPYRIGHT = "2010, 2015 Oracle and/or its affiliates. All rights reserved."
COPYRIGHT_FULL = "Copyright (c) " + COPYRIGHT + """
This is a release of dual licensed MySQL Utilities. For the avoidance of
doubt, this particular copy of the software is released
under the version 2 of the GNU General Public License.
MySQL Utilities is brought to you by Oracle.
"""
LICENSE = "GPLv2"
VERSION_FRM = ("MySQL Utilities {program} version %s \n"
"License type: %s" % (VERSION_STRING, LICENSE))
LICENSE_FRM = (VERSION_FRM + "\n" + COPYRIGHT_FULL)
PYTHON_MIN_VERSION = (2, 6, 0)
PYTHON_MAX_VERSION = (3, 0, 0)
CONNECTOR_MIN_VERSION = (1, 2, 1)
# This dictionary has to be updated whenever a utility is added.
# the format to use is:
# '<utility_name>': (<PYTHON_MIN_VERSION>, <PYTHON_MAX_VERSION>)
AVAILABLE_UTILITIES = {
'mysqlauditadmin': (),
'mysqlauditgrep': (),
'mysqldbcompare': (),
'mysqldbcopy': (),
'mysqldbexport': (),
'mysqldbimport': (),
'mysqldiff': (),
'mysqldiskusage': (),
'mysqlfailover': (),
'mysqlfrm': (),
'mysqlindexcheck': (),
'mysqlmetagrep': (),
'mysqlprocgrep': (),
'mysqlreplicate': (),
'mysqlrpladmin': (),
'mysqlrplcheck': (),
'mysqlrplms': (),
'mysqlrplshow': (),
'mysqlrplsync': (),
'mysqlserverclone': (),
'mysqlserverinfo': (),
'mysqluc': (),
'mysqluserclone': (),
}
| gpl-2.0 | 2,112,526,006,098,551,300 | 31.277778 | 78 | 0.667384 | false |
delete/spymanager | tests/test_subscriptions.py | 1 | 1195 | import sys
sys.path.insert(0, '../spymanager')
sys.path.insert(0, '../')
from tests import create_database_collection
from src.subscriptions import SubscriptionsManager
# Database settings
DATABASE_NAME = 'spies_database'
COLLECTION_NAME = 'subscriptions'
subscriptions_collection = create_database_collection(DATABASE_NAME, COLLECTION_NAME)
subscriptions_manager = SubscriptionsManager(subscriptions_collection)
# User to test
USERNAME = 'pinheirofellipe'
# Clear before tests
subscriptions_manager.remove(USERNAME)
subscriptions_manager.add(USERNAME)
all_subscritions = subscriptions_manager.all()
assert len(all_subscritions) == 1
user = subscriptions_manager.get(USERNAME)
assert user.username == USERNAME
assert user.exists() is True
subscribers = [
{
"spy": "spy1",
"group": "g1",
"chat_id": 123456
}, {
"spy": "spy2",
"group": "g1",
"chat_id": 654321
}
]
user.add_subscribers(subscribers)
assert len(user.subscribers) == 2
subscriber_to_remove = {
"spy": "spy1",
"group": "g1",
"chat_id": 123456
}
user.remove_subscriber(subscriber_to_remove)
assert len(user.subscribers) == 1
print('Well done!')
| mit | -3,622,936,483,652,796,400 | 18.916667 | 85 | 0.697071 | false |
syagev/kaggle_dsb | luna16/src/deep/unet/unet.py | 1 | 7592 | import theano
import theano.tensor as T
import lasagne
from lasagne.layers import InputLayer, Conv2DLayer, MaxPool2DLayer, batch_norm, DropoutLayer, GaussianNoiseLayer
from lasagne.init import HeNormal
from lasagne import nonlinearities
from lasagne.layers import ConcatLayer, Upscale2DLayer
from lasagne.regularization import l2, regularize_network_params
import logging
from params import params as P
import numpy as np
def output_size_for_input(in_size, depth):
in_size -= 4
for _ in range(depth-1):
in_size = in_size//2
in_size -= 4
for _ in range(depth-1):
in_size = in_size*2
in_size -= 4
return in_size
NET_DEPTH = P.DEPTH #Default 5
INPUT_SIZE = P.INPUT_SIZE #Default 512
OUTPUT_SIZE = output_size_for_input(INPUT_SIZE, NET_DEPTH)
def filter_for_depth(depth):
return 2**(P.BRANCHING_FACTOR+depth)
def define_network(input_var):
batch_size = None
net = {}
net['input'] = InputLayer(shape=(batch_size,P.CHANNELS,P.INPUT_SIZE,P.INPUT_SIZE), input_var=input_var)
nonlinearity = nonlinearities.leaky_rectify
if P.GAUSSIAN_NOISE > 0:
net['input'] = GaussianNoiseLayer(net['input'], sigma=P.GAUSSIAN_NOISE)
def contraction(depth, deepest):
n_filters = filter_for_depth(depth)
incoming = net['input'] if depth == 0 else net['pool{}'.format(depth-1)]
net['conv{}_1'.format(depth)] = Conv2DLayer(incoming,
num_filters=n_filters, filter_size=3, pad='valid',
W=HeNormal(gain='relu'),
nonlinearity=nonlinearity)
net['conv{}_2'.format(depth)] = Conv2DLayer(net['conv{}_1'.format(depth)],
num_filters=n_filters, filter_size=3, pad='valid',
W=HeNormal(gain='relu'),
nonlinearity=nonlinearity)
if P.BATCH_NORMALIZATION:
net['conv{}_2'.format(depth)] = batch_norm(net['conv{}_2'.format(depth)], alpha=P.BATCH_NORMALIZATION_ALPHA)
if not deepest:
net['pool{}'.format(depth)] = MaxPool2DLayer(net['conv{}_2'.format(depth)], pool_size=2, stride=2)
def expansion(depth, deepest):
n_filters = filter_for_depth(depth)
incoming = net['conv{}_2'.format(depth+1)] if deepest else net['_conv{}_2'.format(depth+1)]
upscaling = Upscale2DLayer(incoming,4)
net['upconv{}'.format(depth)] = Conv2DLayer(upscaling,
num_filters=n_filters, filter_size=2, stride=2,
W=HeNormal(gain='relu'),
nonlinearity=nonlinearity)
if P.SPATIAL_DROPOUT > 0:
bridge_from = DropoutLayer(net['conv{}_2'.format(depth)], P.SPATIAL_DROPOUT)
else:
bridge_from = net['conv{}_2'.format(depth)]
net['bridge{}'.format(depth)] = ConcatLayer([
net['upconv{}'.format(depth)],
bridge_from],
axis=1, cropping=[None, None, 'center', 'center'])
net['_conv{}_1'.format(depth)] = Conv2DLayer(net['bridge{}'.format(depth)],
num_filters=n_filters, filter_size=3, pad='valid',
W=HeNormal(gain='relu'),
nonlinearity=nonlinearity)
#if P.BATCH_NORMALIZATION:
# net['_conv{}_1'.format(depth)] = batch_norm(net['_conv{}_1'.format(depth)])
if P.DROPOUT > 0:
net['_conv{}_1'.format(depth)] = DropoutLayer(net['_conv{}_1'.format(depth)], P.DROPOUT)
net['_conv{}_2'.format(depth)] = Conv2DLayer(net['_conv{}_1'.format(depth)],
num_filters=n_filters, filter_size=3, pad='valid',
W=HeNormal(gain='relu'),
nonlinearity=nonlinearity)
for d in range(NET_DEPTH):
#There is no pooling at the last layer
deepest = d == NET_DEPTH-1
contraction(d, deepest)
for d in reversed(range(NET_DEPTH-1)):
deepest = d == NET_DEPTH-2
expansion(d, deepest)
# Output layer
net['out'] = Conv2DLayer(net['_conv0_2'], num_filters=P.N_CLASSES, filter_size=(1,1), pad='valid',
nonlinearity=None)
#import network_repr
#print network_repr.get_network_str(net['out'])
logging.info('Network output shape '+ str(lasagne.layers.get_output_shape(net['out'])))
return net
def score_metrics(out, target_var, weight_map, l2_loss=0):
_EPSILON=1e-8
out_flat = out.dimshuffle(1,0,2,3).flatten(ndim=2).dimshuffle(1,0)
target_flat = target_var.dimshuffle(1,0,2,3).flatten(ndim=1)
weight_flat = weight_map.dimshuffle(1,0,2,3).flatten(ndim=1)
prediction = lasagne.nonlinearities.softmax(out_flat)
prediction_binary = T.argmax(prediction, axis=1)
dice_score = (T.sum(T.eq(2, prediction_binary+target_flat))*2.0 /
(T.sum(prediction_binary) + T.sum(target_flat)))
loss = lasagne.objectives.categorical_crossentropy(T.clip(prediction,_EPSILON,1-_EPSILON), target_flat)
loss = loss * weight_flat
loss = loss.mean()
loss += l2_loss
accuracy = T.mean(T.eq(prediction_binary, target_flat),
dtype=theano.config.floatX)
return loss, accuracy, dice_score, target_flat, prediction, prediction_binary
def define_updates(network, input_var, target_var, weight_var):
params = lasagne.layers.get_all_params(network, trainable=True)
out = lasagne.layers.get_output(network)
test_out = lasagne.layers.get_output(network, deterministic=True)
l2_loss = P.L2_LAMBDA * regularize_network_params(network, l2)
train_metrics = score_metrics(out, target_var, weight_var, l2_loss)
loss, acc, dice_score, target_prediction, prediction, prediction_binary = train_metrics
val_metrics = score_metrics(test_out, target_var, weight_var, l2_loss)
t_loss, t_acc, t_dice_score, t_target_prediction, t_prediction, t_prediction_binary = train_metrics
l_r = theano.shared(np.array(P.LEARNING_RATE, dtype=theano.config.floatX))
if P.OPTIMIZATION == 'nesterov':
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=l_r, momentum=P.MOMENTUM)
if P.OPTIMIZATION == 'adam':
updates = lasagne.updates.adam(
loss, params, learning_rate=l_r)
logging.info("Defining train function")
train_fn = theano.function([input_var, target_var, weight_var],[
loss, l2_loss, acc, dice_score, target_prediction, prediction, prediction_binary],
updates=updates)
logging.info("Defining validation function")
val_fn = theano.function([input_var, target_var, weight_var], [
t_loss, l2_loss, t_acc, t_dice_score, t_target_prediction, t_prediction, t_prediction_binary])
return train_fn, val_fn, l_r
def define_predict(network, input_var):
params = lasagne.layers.get_all_params(network, trainable=True)
out = lasagne.layers.get_output(network, deterministic=True)
out_flat = out.dimshuffle(1,0,2,3).flatten(ndim=2).dimshuffle(1,0)
prediction = lasagne.nonlinearities.softmax(out_flat)
print "Defining predict"
predict_fn = theano.function([input_var],[prediction])
return predict_fn
| apache-2.0 | 8,048,763,956,391,622,000 | 40.037838 | 126 | 0.597208 | false |
CaliopeProject/CaliopeServer | src/cid/modules/siim2_forms/models/editable_models.py | 1 | 1346 | # -*- encoding: utf-8 -*-
"""
@authors: Andrés Felipe Calderón [email protected]
Sebastián Ortiz V. [email protected]
@license: GNU AFFERO GENERAL PUBLIC LICENSE
SIIM2 Models are the data definition of SIIM2 Information System
Copyright (C) 2013 Infometrika Ltda.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#system, and standard library
#neomodel primitives
from neomodel.properties import (Property,
DateTimeProperty,
FloatProperty,
IntegerProperty,
StringProperty)
from cid.core.forms.models import SIIMForm
class ProjectForm(SIIMForm):
pass | agpl-3.0 | 160,416,028,745,061,100 | 36.333333 | 79 | 0.685034 | false |
kbsezginel/tee_mof | thermof/initialize/job.py | 1 | 2422 | # Date: September 2017
# Author: Kutay B. Sezginel
"""
Initializing job submission files for computing cluster
"""
import os
from thermof.sample import slurm_file, slurm_scratch_file, pbs_file
from . import read_lines, write_lines
def job_submission_file(simdir, parameters, verbose=True):
""" Generate job submission file from given parameters """
jobpar = parameters.job
file_name = os.path.join(simdir, '%s.%s' % (jobpar['prefix'], jobpar['name']))
print('III. Writing %s job submission file -> %s' % (jobpar['scheduler'], file_name)) if verbose else None
if jobpar['scheduler'] == 'slurm':
write_slurm_file(file_name, jobpar, sample=slurm_file)
elif jobpar['scheduler'] == 'slurm-scratch':
write_slurm_file(file_name, jobpar, sample=slurm_scratch_file)
elif jobpar['scheduler'] == 'pbs':
write_pbs_file(file_name, jobpar, sample=pbs_file)
else:
print('Select job scheduler: slurm / slurm-scratch / pbs')
def write_slurm_file(file_name, jobpar, sample):
""" Write slurm job submission file """
job_lines = read_lines(sample)
job_lines[2] = '#SBATCH --job-name=%s\n' % jobpar['name']
job_lines[3] = '#SBATCH --output=%s.out\n' % jobpar['name']
job_lines[4] = '#SBATCH --nodes=%i\n' % jobpar['nodes']
job_lines[5] = '#SBATCH --ntasks-per-node=%i\n' % jobpar['ppn']
job_lines[6] = '#SBATCH --time=%s\n' % jobpar['walltime']
job_lines[7] = '#SBATCH --cluster=%s\n' % jobpar['cluster']
if jobpar['scheduler'] == 'slurm':
job_lines[18] = 'srun --mpi=pmi2 lmp_mpi -in %s > %s\n' % (jobpar['input'], jobpar['output'])
elif jobpar['scheduler'] == 'slurm-scratch':
job_lines[24] = 'zfs=%s\n' % jobpar['zfsdir']
job_lines[38] = 'lmpdir=%s\n' % (jobpar['lmpdir'])
job_lines[39] = 'srun --mpi=pmi2 $lmpdir -in %s > %s\n' % (jobpar['input'], jobpar['output'])
write_lines(file_name, job_lines)
def write_pbs_file(file_name, jobpar, sample):
""" Write PBS job submission file """
job_lines = read_lines(sample)
job_lines[3] = '#PBS -N %s\n' % jobpar['name']
job_lines[4] = '#PBS -q %s\n' % jobpar['queue']
job_lines[5] = '#PBS -l nodes=%i:ppn=%i\n' % (jobpar['nodes'], jobpar['ppn'])
job_lines[6] = '#PBS -l walltime=%s\n' % jobpar['walltime']
job_lines[15] = 'prun lammps < %s > %s' % (jobpar['input'], jobpar['output'])
write_lines(file_name, job_lines)
| mit | 3,736,780,771,206,809,600 | 45.576923 | 110 | 0.618084 | false |
mcroydon/django-tumbleweed | tumbleweed/views.py | 1 | 11972 | from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.core.paginator import Paginator
from django.http import Http404, HttpResponseServerError
from haystack.query import SearchQuerySet
from django.conf import settings
import datetime, time
RESULTS_PER_PAGE = getattr(settings, 'TUMBLEWEED_RESULTS_PER_PAGE', 20)
def tumble(request, date_field='pub_date', template_name='tumbleweed/tumble.html', searchqueryset=None,
paginate_by=RESULTS_PER_PAGE, context_class=RequestContext, extra_context={}):
"""
A tumblelog view that harnesses the denormalized data in a haystack index.
Optional parameters:
date_field
The name of the field in your `haystack`_ index that you would like to order
your tumbles by. Default: ``pub_date``.
template_name
The name of the template to render. Default: :template:`tumbleweed/tumble.html`.
searchqueryset
You may pass in your own SearchQuerySet_ if you would like to further restrict
what items show up in the tumble view. This is useful for filtering only live
objects or only objects whose publication date has passed. Default: ``None``.
paginate_by
The number of objects to include in each page of the tumble. Default:
``TUMBLEWEED_RESULTS_PER_PAGE`` in your settings file, or 20.
context_class
Pass in your own `context class`_. Default: Django's ``RequestContext``.
extra_context
A dictionary containing extra variables to be included in the context, similar
to ``extra_context`` included in Django's generic views.
Template context:
page
The current page of haystack results.
paginator
The Paginator_ for access to information about the paginated list
for creating next/previous links, showing the total number of
tumbled items, etc.
.. _haystack: http://haystacksearch.org/
.. _SearchQuerySet: http://haystacksearch.org/docs/searchqueryset_api.html
.. _context class: http://docs.djangoproject.com/en/dev/ref/templates/api/#id1
.. _Paginator: http://docs.djangoproject.com/en/dev/topics/pagination/
"""
if not searchqueryset:
searchqueryset = SearchQuerySet().all()
things = searchqueryset.order_by('-%s' % date_field)
paginator = Paginator(things, paginate_by)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except ValueError:
raise Http404
context_dict = {
'page': page,
'paginator': paginator,
}
context_dict.update(extra_context)
return render_to_response(template_name, context_dict, context_instance=context_class(request))
def archive_year(request, year, searchqueryset=None, date_field='pub_date', template_name='tumbleweed/tumble_archive_year.html', **kwargs):
"""
A paginated list of tumbled item for a given year.
Required parameters:
year
The year to tumble, usually passed in as part of the URL.
Optional parameters:
date_field
The name of the field in your `haystack`_ index that you would like to order
your tumbles by. Default: ``pub_date``.
template_name
The name of the template to render. Default: :template:`tumbleweed/tumble.html`.
searchqueryset
You may pass in your own SearchQuerySet_ if you would like to further restrict
what items show up in the tumble view. This is useful for filtering only live
objects or only objects whose publication date has passed. Default: ``None``.
paginate_by
The number of objects to include in each page of the tumble. Default:
``TUMBLEWEED_RESULTS_PER_PAGE`` in your settings file, or 20.
context_class
Pass in your own `context class`_. Default: Django's ``RequestContext``.
extra_context
A dictionary containing extra variables to be included in the context, similar
to ``extra_context`` included in Django's generic views.
Template context:
page
The current page of haystack results.
paginator
The Paginator_ for access to information about the paginated list
for creating next/previous links, showing the total number of
tumbled items, etc.
.. _haystack: http://haystacksearch.org/
.. _SearchQuerySet: http://haystacksearch.org/docs/searchqueryset_api.html
.. _context class: http://docs.djangoproject.com/en/dev/ref/templates/api/#id1
.. _Paginator: http://docs.djangoproject.com/en/dev/topics/pagination/
"""
if not searchqueryset:
searchqueryset = SearchQuerySet().all()
try:
year = int(year)
except ValueError:
return HttpResponseServerError(u'An integer is required for year.')
# TODO: Less ugly, please.
lookup_kwargs = {
'%s__gte' % date_field: datetime.datetime(year, 1, 1),
'%s__lte' % date_field: datetime.datetime(year, 12, 31, 23, 59, 59)
}
return tumble(request, searchqueryset=searchqueryset.filter(**lookup_kwargs), template_name=template_name, **kwargs)
def archive_month(request, year, month, searchqueryset=None, date_field='pub_date', month_format='%b',
template_name='tumbleweed/tumble_archive_month.html', **kwargs):
"""
A paginated list of tumbled item for a given month.
Required parameters:
year
The year to tumble, usually passed in as part of the URL.
month
The month to tumble, usually passed in as part of the URL.
Optional parameters:
month_format
The `date formatting`_ code used to interpret the month passed in as a string.
Default: ``%b``.
date_field
The name of the field in your `haystack`_ index that you would like to order
your tumbles by. Default: ``pub_date``.
template_name
The name of the template to render. Default: :template:`tumbleweed/tumble.html`.
searchqueryset
You may pass in your own SearchQuerySet_ if you would like to further restrict
what items show up in the tumble view. This is useful for filtering only live
objects or only objects whose publication date has passed. Default: ``None``.
paginate_by
The number of objects to include in each page of the tumble. Default:
``TUMBLEWEED_RESULTS_PER_PAGE`` in your settings file, or 20.
context_class
Pass in your own `context class`_. Default: Django's ``RequestContext``.
extra_context
A dictionary containing extra variables to be included in the context, similar
to ``extra_context`` included in Django's generic views.
Template context:
page
The current page of haystack results.
paginator
The Paginator_ for access to information about the paginated list
for creating next/previous links, showing the total number of
tumbled items, etc.
.. _date formatting: http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
.. _haystack: http://haystacksearch.org/
.. _SearchQuerySet: http://haystacksearch.org/docs/searchqueryset_api.html
.. _context class: http://docs.djangoproject.com/en/dev/ref/templates/api/#id1
.. _Paginator: http://docs.djangoproject.com/en/dev/topics/pagination/
"""
if not searchqueryset:
searchqueryset = SearchQuerySet().all()
# TODO: day list?
# This logic courtesy of Django's date-based generic views
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
return tumble(request, searchqueryset=searchqueryset.filter(**lookup_kwargs), template_name=template_name, **kwargs)
def archive_day(request, year, month, day, searchqueryset=None, date_field='pub_date', month_format='%b', day_format='%d',
template_name='tumbleweed/tumble_archive_day.html', **kwargs):
"""
A paginated list of tumbled item for a given month.
Required parameters:
year
The year to tumble, usually passed in as part of the URL.
month
The month to tumble, usually passed in as part of the URL.
day
The day to tumble, usualy passed in as part of the URL.
Optional parameters:
month_format
The `date formatting`_ code used to interpret the month passed in as a string.
Default: ``%b``.
day_format
The `date formatting`_ code used to interpret the day pass in as a string.
Default: ``%d``.
date_field
The name of the field in your `haystack`_ index that you would like to order
your tumbles by. Default: ``pub_date``.
template_name
The name of the template to render. Default: :template:`tumbleweed/tumble.html`.
searchqueryset
You may pass in your own SearchQuerySet_ if you would like to further restrict
what items show up in the tumble view. This is useful for filtering only live
objects or only objects whose publication date has passed. Default: ``None``.
paginate_by
The number of objects to include in each page of the tumble. Default:
``TUMBLEWEED_RESULTS_PER_PAGE`` in your settings file, or 20.
context_class
Pass in your own `context class`_. Default: Django's ``RequestContext``.
extra_context
A dictionary containing extra variables to be included in the context, similar
to ``extra_context`` included in Django's generic views.
Template context:
page
The current page of haystack results.
paginator
The Paginator_ for access to information about the paginated list
for creating next/previous links, showing the total number of
tumbled items, etc.
.. _date formatting: http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now
.. _haystack: http://haystacksearch.org/
.. _SearchQuerySet: http://haystacksearch.org/docs/searchqueryset_api.html
.. _context class: http://docs.djangoproject.com/en/dev/ref/templates/api/#id1
.. _Paginator: http://docs.djangoproject.com/en/dev/topics/pagination/
"""
if not searchqueryset:
searchqueryset = SearchQuerySet().all()
# More logic courtesy of Django
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
lookup_kwargs = {
'%s__gte' % date_field: datetime.datetime.combine(date, datetime.time.min),
'%s__lte' % date_field: datetime.datetime.combine(date, datetime.time.max)
}
return tumble(request, searchqueryset=searchqueryset.filter(**lookup_kwargs), template_name=template_name, **kwargs)
| bsd-3-clause | 96,717,738,742,192,700 | 39.040134 | 139 | 0.635483 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/prod/web/prod_wdg.py | 1 | 7380 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['IntrospectWdg','IntrospectSelectWdg', 'ProdIconButtonWdg',
'ProdIconSubmitWdg', 'SnapshotInfoWdg', 'SnapshotLabelWdg',
'AssetLibrarySelectionWdg', 'SObjectSelectionWdg']
from pyasm.web import *
from pyasm.widget import *
from pyasm.search import Search, SObject
from pyasm.prod.biz import *
from pyasm.common import Container
from pyasm.prod.load import ProdLoaderContext
class ProdIconButtonWdg(IconButtonWdg):
def __init__(self, name=None, icon=None, long=True, icon_pos="left"):
super(ProdIconButtonWdg,self).__init__(name, icon, long, icon_pos)
self.add_style("line-height: 14px")
self.add_style("font-size: 0.8em")
self.add_style("padding: 3px 10px 3px 10px")
class ProdIconSubmitWdg(IconSubmitWdg):
def __init__(self, name=None, icon=None, long=True, icon_pos="left"):
super(ProdIconSubmitWdg,self).__init__(name, icon, long, icon_pos)
self.add_style("line-height: 14px")
self.add_style("font-size: 0.8em")
self.add_style("padding: 3px 10px 3px 10px")
class IntrospectWdg(ProdIconSubmitWdg):
'''a widget that does introspection to analyze/update what
assets(versions) are loaded in the session of the app'''
def __init__(self):
super(IntrospectWdg, self).__init__("Introspect", long=True)
self.add_style("height: 14px")
self.add_style("font-size: 0.8em")
#self.add_style("padding: 3px 10px 2px 10px")
self.add_behavior({'type': "click", 'cbjs_action': "introspect(bvr)"})
class IntrospectSelectWdg(ProdIconSubmitWdg):
'''a widget that does selected introspection to analyze/update
what assets(versions) are loaded in the session of the app'''
def __init__(self):
super(IntrospectSelectWdg, self).__init__("Introspect Select", long=True)
self.add_style("height: 14px")
self.add_style("font-size: 0.8em")
self.add_event("onclick", "introspect_select()")
class SnapshotInfoWdg(BaseTableElementWdg):
'''a widget that extracts the info of the xml snippet of a snapshot'''
def preprocess(self):
search_type_list = SObject.get_values(self.sobjects, 'search_type', unique=True)
search_id_dict = {}
self.ref_sobject_cache = {}
# initialize the search_id_dict
for type in search_type_list:
search_id_dict[type] = []
# cache it first
for sobject in self.sobjects:
search_type = sobject.get_value('search_type')
search_id_list = search_id_dict.get(search_type)
search_id_list.append(sobject.get_value('search_id'))
from pyasm.search import SearchException
for key, value in search_id_dict.items():
try:
ref_sobjects = Search.get_by_id(key, value)
sobj_dict = SObject.get_dict(ref_sobjects)
except SearchException as e:
print("WARNING: search_type [%s] with id [%s] does not exist" % (key, value))
print(str(e))
sobj_dict = {}
# store a dict of dict with the search_type as key
self.ref_sobject_cache[key] = sobj_dict
def get_display(self):
search_type = self.get_current_sobject().get_value('search_type')
search_id = self.get_current_sobject().get_value('search_id')
sobject = None
if self.ref_sobject_cache:
sobj_dict = self.ref_sobject_cache.get(search_type)
if sobj_dict:
sobject = sobj_dict.get(str(search_id))
else:
sobject = Search.get_by_id(search_type, search_id)
if sobject:
if isinstance(sobject, ShotInstance):
code = "%s-%s" %(sobject.get_value('shot_code'), sobject.get_code())
elif sobject.has_value('name'):
code = "%s-%s" %(sobject.get_value('name'), sobject.get_code())
else:
code = sobject.get_code()
else:
code = "n/a"
return code
class SnapshotLabelWdg(BaseTableElementWdg):
def get_snapshot(self, mode):
''' get the snapshot depending on the mode i.e. input, output'''
dict = self.get_current_aux_data()
output_snapshots = input_snapshots = None
if dict and '%s_snapshots'%mode in dict:
if mode == 'output':
output_snapshots = dict.get('%s_snapshots' %mode)
else:
input_snapshots = dict.get('%s_snapshots' %mode)
else:
sobject = self.get_current_sobject()
context = self.get_context()
loader = ProdLoaderContext()
output_snapshots = loader.get_output_snapshots(sobject, context)
input_snapshots = loader.get_input_snapshots(sobject, context)
# this is for sharing with AssetLoaderWdg
# should only be called once per sobject
self.append_aux_data({'output_snapshots': output_snapshots, \
'input_snapshots': input_snapshots})
if mode == 'output':
return output_snapshots
else:
return input_snapshots
def get_context(self):
context_select = Container.get('context_filter')
context = 'publish'
if context_select:
context = context_select.get_value()
if context == "":
values = context_select.get_option('values')
context = values[len(values)-1]
return context
def get_display(self):
snapshot = self.get_snapshot('output')
label = None
if snapshot:
label = snapshot.get_label()
widget = Widget()
if label:
widget.add(IconWdg(label, eval('IconWdg.%s' %label.upper())))
else:
widget.add('')
return widget
class AssetLibrarySelectionWdg(SelectWdg):
def get_display(self):
search = Search('prod/asset_library')
self.set_search_for_options(search, 'code', 'title')
self.set_option('web_state', 'true')
self.add_empty_option()
select = super(AssetLibrarySelectionWdg, self).get_display()
span = SpanWdg(select)
insert_wdg = IframeInsertLinkWdg(search.get_search_type())
insert_wdg.set_refresh_mode("page")
span.add(insert_wdg)
return span
class SObjectSelectionWdg(SelectWdg):
def get_display(self):
search_type = self.get_option('search_type')
if not search_type:
return
search = Search(search_type)
self.set_search_for_options(search, 'code', 'code')
self.set_option('web_state', 'true')
self.add_empty_option()
select = super(SObjectSelectionWdg, self).get_display()
span = SpanWdg(select)
insert_wdg = IframeInsertLinkWdg(search.get_search_type())
insert_wdg.set_refresh_mode("page")
span.add(insert_wdg)
return span
| epl-1.0 | -2,703,030,322,963,593,000 | 34.652174 | 93 | 0.595799 | false |
k0001/mediasancion | extras/importers/mscrap_import.py | 1 | 18027 | # coding: utf8
# MediaSanción, aplicación web para acceder a los datos públicos de la
# actividad legislativa en Argentina.
# Copyright (C) 2010,2011,2012 Renzo Carbonara <renzo @carbonara .com .ar>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# It's recommended that you import items in the following order:
# 1. LegisladorItem
# 2. ProyectoItem
# 3. FirmaProyecto|TramiteProyectoItem|DictamenProyectoItem
# This program is ugly as shit.
import json
import logging
import optparse
import os
import random
import re
import signal
import sys
import time
import isodate
from datetime import datetime
from pprint import pprint
from django.db.models import Q
from django.db import transaction
from mediasancion.core.models import Partido, Distrito, Bloque, Persona
from mediasancion.congreso.models import (Proyecto, FirmaProyecto, Legislador,
Comision, DictamenProyecto, TramiteProyecto)
logging.basicConfig(level=logging.WARNING)
log = logging.getLogger(os.path.basename(__file__))
AUDIT_ORIGIN = u'mscrap_import:%s' % datetime.utcnow().isoformat()
def store_legislador_item(x):
try:
distrito = Distrito.objects.get(nombre=x['distrito_nombre'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=x['distrito_nombre'], origin=AUDIT_ORIGIN)
distrito.resource_source = x['resource_source']
distrito.resource_url = x['resource_url']
distrito.save()
if x.get('bloque_nombre'):
try:
bloque = Bloque.objects.get(nombre=x['bloque_nombre'])
except Bloque.DoesNotExist:
bloque = Bloque(nombre=x['bloque_nombre'], origin=AUDIT_ORIGIN)
bloque.resource_source = x['resource_source']
bloque.resource_url = x['resource_url']
bloque.save()
else:
bloque = None
if x.get('partido_nombre'):
try:
partido = Partido.objects.get(nombre=x['partido_nombre'])
except Partido.DoesNotExist:
partido = Partido(nombre=x['partido_nombre'], origin=AUDIT_ORIGIN)
partido.resource_source = x['resource_source']
partido.resource_url = x['resource_url']
partido.save()
else:
partido = None
persona_created = True
try:
persona = Persona.objects.get(nombre=x['nombre'],
apellido=x['apellido'],
legislador__camara=x['camara'],
legislador__bloque=bloque,
legislador__distrito=distrito,
legislador__inicio=isodate.parse_date(x['mandato_inicio']),
legislador__fin=isodate.parse_date(x['mandato_fin']))
persona_created = False
except Persona.DoesNotExist:
try:
persona = Persona.objects.get(nombre=x['nombre'], apellido=x['apellido'])
persona_created = False
except Persona.MultipleObjectsReturned:
log.error((u"This is an expected error! Aparently you have more than one Persona named: "
u"%(apellido)s, %(nombre)s. You'll have to fix this by hand. Set var 'persona' "
u"to the desired Persona instance and continue (c)") % x)
import ipdb; ipdb.set_trace()
except Persona.DoesNotExist:
persona = Persona(nombre=x['nombre'], apellido=x['apellido'], origin=AUDIT_ORIGIN)
try:
assert isinstance(persona, Persona)
except (NameError, AssertionError):
raise RuntimeError(u"Missing Persona, sorry, need to abort.")
persona.email = x.get('email') or None # the 'or None' thing is cuz we don't want empty strings.
persona.telefono = x.get('telefono') or None
persona.foto = x.get('foto_url') or None # <--- makes no sense, but we don't care right now.
persona.save()
if persona_created:
persona.resource_source = x['resource_source']
persona.resource_url = x['resource_url']
persona.resource_id = x['resource_id']
persona.save()
log.debug(u'Created %s Persona' % persona.uuid)
else:
log.debug(u'Updated %s Persona' % persona.uuid)
try:
legislador = Legislador.objects.get(persona=persona,
camara=x['camara'],
bloque=bloque,
distrito=distrito,
inicio=isodate.parse_date(x['mandato_inicio']),
fin=isodate.parse_date(x['mandato_fin']))
log.debug(u'Updated %s Legislador' % legislador.uuid)
except Legislador.DoesNotExist:
legislador = Legislador(persona=persona,
camara=x['camara'],
bloque=bloque,
partido=partido,
distrito=distrito,
inicio=isodate.parse_date(x['mandato_inicio']),
fin=isodate.parse_date(x['mandato_fin']))
legislador.resource_source = x['resource_source']
legislador.resource_url = x['resource_url']
legislador.resource_id = x['resource_id']
legislador.origin = AUDIT_ORIGIN
legislador.save()
log.debug(u'Created %s Legislador' % legislador.uuid)
return True
def store_proyecto_item(x):
try:
p = Proyecto.objects.get(camara_origen_expediente=x['camara_origen_expediente'],
camara_origen=x['camara_origen'])
proyecto_created = False
except Proyecto.DoesNotExist:
p = Proyecto(camara_origen_expediente=x['camara_origen_expediente'],
camara_origen=x['camara_origen'],
origin=AUDIT_ORIGIN)
proyecto_created = True
p.resource_source = x['resource_source']
p.resource_url = x['resource_url']
p.resource_id = x['resource_id']
p.origen = x['origen']
p.camara_revisora = x['camara_revisora'] if 'camara_revisora' in x else None
p.camara_revisora_expediente = x.get('camara_revisora_expediente') or ''
p.reproduccion_expediente = x.get('reproduccion_expediente') or ''
p.ley_numero = x.get('ley_numero')
p.tipo = x['tipo']
p.mensaje = x.get('mensaje_codigo') or ''
p.publicacion_en = x.get('publicacion_en') or ''
p.publicacion_fecha = isodate.parse_date(x['publicacion_fecha'])
p.texto_completo_url = x.get('texto_completo_url', '')
p.texto_mediasancion_senadores_url = x.get('texto_mediasancion_senadores_url', '')
p.texto_mediasancion_diputados_url = x.get('texto_mediasancion_diputados_url', '')
p.sumario = x['sumario']
p.save()
cd = x.get('comisiones_diputados', ())
for s in cd:
s = s.capitalize()
try:
c = Comision.objects.get(camara='D', nombre__iexact=s)
except Comision.DoesNotExist:
c = Comision(camara='D', nombre=s, origin=AUDIT_ORIGIN)
c.resource_source = x['resource_source']
c.resource_url = x['resource_url']
c.save()
if not c in p.comisiones.all():
p.comisiones.add(c)
for s in x.get('comisiones_senadores', ()):
s = s.capitalize()
try:
c = Comision.objects.get(camara='S', nombre__iexact=s)
except Comision.DoesNotExist:
c = Comision(camara='S', nombre=s, origin=AUDIT_ORIGIN)
c.resource_source = x['resource_source']
c.resource_url = x['resource_url']
c.save()
if not c in p.comisiones.all():
p.comisiones.add(c)
if proyecto_created:
log.debug(u'Created %s Proyecto' % p.uuid)
return True
else:
log.debug(u'Updated %s Proyecto' % p.uuid)
return True
def store_firmaproyecto_item(x):
try:
proyecto = Proyecto.objects.get(camara_origen_expediente=x['proyecto_camara_origen_expediente'],
camara_origen=x['proyecto_camara_origen'])
except Proyecto.DoesNotExist:
return False
if x.get('firmante_bloque'):
try:
bloque = Bloque.objects.get(nombre=x['firmante_bloque'])
except Bloque.DoesNotExist:
bloque = Bloque(nombre=x['firmante_bloque'], origin=AUDIT_ORIGIN)
bloque.resource_source = x['resource_source']
bloque.resource_url = x['resource_url']
bloque.save()
else:
bloque = None
if x.get('firmante_distrito'):
try:
distrito = Distrito.objects.get(nombre=x['firmante_distrito'])
except Distrito.DoesNotExist:
distrito = Distrito(nombre=x['firmante_distrito'], origin=AUDIT_ORIGIN)
distrito.resource_source = x['resource_source']
distrito.resource_url = x['resource_url']
distrito.save()
else:
distrito = None
poder =x['firmante_poder']
firmante_special = x.get('firmante_special') or u''
if not firmante_special:
firmante_apellido = x.get('firmante_apellido') or u''
firmante_nombre = x.get('firmante_nombre') or u''
try:
persona = Persona.objects.get(apellido=firmante_apellido,
nombre=firmante_nombre)
except Persona.DoesNotExist:
persona = Persona(apellido=firmante_apellido,
nombre=firmante_nombre,
origin=AUDIT_ORIGIN)
persona.resource_source = x['resource_source']
persona.resource_url = x['resource_url']
persona.save()
try:
legislador = Legislador.objects.get(persona=persona,
bloque=bloque,
distrito=distrito)
except Legislador.DoesNotExist:
# if legislador created, inicio and fin will be missing. Whatever.
legislador = Legislador(persona=persona,
bloque=bloque,
distrito=distrito,
camara='?',
origin=AUDIT_ORIGIN)
legislador.resource_source = x['resource_source']
legislador.resource_url = x['resource_url']
legislador.save()
else:
persona = legislador = None
try:
fp = FirmaProyecto.objects.get(proyecto=proyecto,
legislador=legislador,
poder=poder,
poder_who=firmante_special,
tipo_firma=x['tipo_firma'])
log.debug(u'Updated %s FirmaProyecto' % fp.uuid)
except FirmaProyecto.DoesNotExist:
fp = FirmaProyecto(proyecto=proyecto,
legislador=legislador,
poder=poder,
poder_who=firmante_special,
tipo_firma=x['tipo_firma'],
origin=AUDIT_ORIGIN)
fp.resource_source = x['resource_source']
fp.resource_url = x['resource_url']
fp.resource_id = x.get('resource_id')
fp.save()
log.debug(u'Created %s FirmaProyecto' % fp.uuid)
return True
def store_dictamenproyecto_item(x):
try:
proyecto = Proyecto.objects.get(camara_origen_expediente=x['proyecto_camara_origen_expediente'],
camara_origen=x['proyecto_camara_origen'])
except Proyecto.DoesNotExist:
return False
x_fecha = isodate.parse_date(x['fecha']) if 'fecha' in x else None
try:
dp = DictamenProyecto.objects.get(proyecto=proyecto,
camara=x['camara'],
index=int(x['index']))
except DictamenProyecto.DoesNotExist:
dp = DictamenProyecto(proyecto=proyecto,
camara=x['camara'],
index=int(x['index']),
fecha=x_fecha,
orden_del_dia=(x.get('orden_del_dia') or u''),
descripcion=(x.get('descripcion') or u''),
resultado=(x.get('resultado') or u''),
origin=AUDIT_ORIGIN)
dp.resource_source = x['resource_source']
dp.resource_url = x['resource_url']
dp.resource_id = x.get('resource_id')
dp.save()
log.debug(u'Created %s DictamenProyecto' % dp.uuid)
else:
dp_changed = False
if dp.resultado and x.get('resultado') and dp.resultado != x.get('resultado'):
dp.resultado = x.get('resultado', u'')
dp_changed = True
if dp.descripcion and x.get('descripcion') and dp.descripcion != x.get('descripcion'):
dp.descripcion = x.get('descripcion', u'')
dp_changed = True
if dp_changed:
dp.save()
log.debug(u'Updated %s DictamenProyecto' % dp.uuid)
return True
def store_tramiteproyecto_item(x):
try:
proyecto = Proyecto.objects.get(camara_origen_expediente=x['proyecto_camara_origen_expediente'],
camara_origen=x['proyecto_camara_origen'])
except Proyecto.DoesNotExist:
return False
x_fecha = isodate.parse_date(x['fecha']) if 'fecha' in x else None
try:
tp = TramiteProyecto.objects.get(proyecto=proyecto,
camara=x['camara'],
index=int(x['index']))
except TramiteProyecto.DoesNotExist:
tp = TramiteProyecto(proyecto=proyecto,
camara=x['camara'],
index=int(x['index']),
fecha=x_fecha,
descripcion=(x.get('descripcion') or u''),
resultado=(x.get('resultado') or u''),
origin=AUDIT_ORIGIN)
tp.resource_source = x['resource_source']
tp.resource_url = x['resource_url']
tp.resource_id = x.get('resource_id')
tp.save()
log.debug(u'Created %s TramiteProyecto' % tp.uuid)
else:
tp_changed = False
if tp.resultado and x.get('resultado') and tp.resultado != x.get('resultado'):
tp.resultado = x.get('resultado', u'')
tp_changed = True
if tp.descripcion and x.get('descripcion') and tp.descripcion != x.get('descripcion'):
tp.descripcion = x.get('descripcion', u'')
tp_changed = True
if tp_changed:
tp.save()
log.debug(u'Updated %s TramiteProyecto' % tp.uuid)
return True
@transaction.commit_manually
def store_item(t, x):
ts = { 'LegisladorItem': store_legislador_item,
'ProyectoItem': store_proyecto_item,
'FirmaProyectoItem': store_firmaproyecto_item,
'DictamenProyectoItem': store_dictamenproyecto_item,
'TramiteProyectoItem': store_tramiteproyecto_item }
try:
_store = ts[t]
except KeyError:
log.warning(u"Skiping %s" % t)
return
try:
s = _store(x)
except:
transaction.rollback()
raise
if s:
transaction.commit()
return True
else:
log.debug(u"Couldn't store %s" % t)
transaction.rollback()
return False
def store_raw(line):
t, x = json.loads(line)
return store_item(t, x)
def main_store(lines):
log.info('Storing...')
for line in lines:
if not store_raw(line):
return
def _sighandle_pdb(sig, frame):
import pdb
pdb.Pdb().set_trace(frame)
signal.signal(signal.SIGUSR1, _sighandle_pdb)
def parse_args():
parser = optparse.OptionParser(usage=u"usage: %prog [options] FILE [FILE..]")
parser.add_option('-v', '--verbose',
action='store_true', dest='verbose',
help=u"verbose output")
parser.add_option('--debug',
action='store_true', dest='debug',
help=u"debug output")
parser.add_option('--wtf',
action='store_true', dest='wtf',
help=u"enable WTF post-mortem debugger")
opts, args = parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
return opts, args
if __name__ == '__main__':
opts, args = parse_args()
if opts.debug:
log.setLevel(logging.DEBUG)
elif opts.verbose:
log.setLevel(logging.INFO)
log.info('PID: %d' % os.getpid())
log.info('SIGUSR1: Start debugger.')
log.info('SIGUSR2: Print status.')
if opts.wtf:
log.info(u"WTF Post-mortem debugger enabled")
try:
for fname in args:
with open(fname, 'rb') as f: # we use ascii-only input (JSON)
log.info(u"Opening %s..." % fname)
main_store(f)
except Exception:
log.error(u"Something bad happened!!! Nothing will saved.")
if opts.wtf:
from wtf import WTF
WTF()
else:
raise
| agpl-3.0 | 3,245,439,087,975,844,000 | 35.265594 | 104 | 0.563249 | false |
flisboac/uksat | wafbuild.py | 1 | 3974 | #!/usr/bin/env python
#from waflib.Configure import conf
#from waflib.TaskGen import feature
def options(ctx):
ctx.add_option('-B', '--build', action='store', default="release",
help='Specifies which build to run.')
ctx.add_option('--list-builds', action='store_true',
help='Lists all available builds and their targets (NOT IMPLEMENTED YET).')
target = _get_all_all_target(ctx)
tools = _get_tools(ctx, {'all': target})
for tool in tools:
ctx.load(tool['tool'], **tool)
def configure(ctx):
targets = _get_build_targets(ctx, include_all = False)
tools = _get_tools(ctx, targets)
programs = _get_programs(ctx, targets)
for tool in tools:
ctx.load(tool['tool'])
for program in programs:
ctx.find_program(**program)
ctx.env.build = ctx.options.build
def build(ctx):
targets = _get_build_targets(ctx)
for targetname in targets:
ctx(**targets[targetname])
def _get_list(ctx, targets, key, defaultkey):
values = {}
for targetname in targets:
target = targets[targetname]
valuelist = target.get(key, [])
if type(valuelist) is list or type(valuelist) is tuple:
for value in valuelist:
if type(value) is dict:
values[value[defaultkey]] = value
#values.append(value)
else:
values[value] = {defaultkey: value}
#values.append({defaultkey: value})
else:
values[valuelist] = {defaultkey: valuelist}
#values.append({defaultkey: valuelist})
return list(values.values())
def _get_tools(ctx, targets):
return _get_list(ctx, targets, 'load', defaultkey = 'tool')
def _get_programs(ctx, targets):
return _get_list(ctx, targets, 'find_program', defaultkey = 'filename')
def _get_all_all_target(ctx):
targets = _get_build_targets(ctx, 'all', include_all = True)
all_target = targets['all'] or {}
return all_target
def _get_build_targets(ctx, buildname = None, include_all = False):
from waflib import Context
if not buildname:
try:
buildname = ctx.env.build
if not buildname: buildname = ctx.options.build
except:
buildname = ctx.options.build
try:
builds = Context.g_module.BUILDS
except:
builds = {}
try:
allbuilddata = builds['all']
except:
allbuilddata = {}
# It's mandatory to have the build declared.
try:
targetbuilddata = builds[buildname]
except:
raise Exception("Build '" + buildname + "' is not declared.")
targetnames = set()
targets = {}
for targetname in allbuilddata: targetnames.add(targetname)
for targetname in targetbuilddata: targetnames.add(targetname)
for targetname in targetnames:
if include_all or targetname != 'all':
targets[targetname] = _get_build_target(ctx, targetname, buildname)
return targets
def _get_build_target(ctx, targetname, buildname = None):
from copy import copy
from waflib import Context
if not buildname:
try:
buildname = ctx.env.build
if not buildname: buildname = ctx.options.build
except:
buildname = ctx.options.build
try:
builds = Context.g_module.BUILDS
except:
raise Exception("BUILDS dictionary is not declared.")
try:
allbuilddata = builds['all']
except:
allbuilddata = {}
try:
allalldata = allbuilddata['all']
except:
allalldata = {}
try:
alldata = allbuilddata[targetname]
except:
alldata = {}
# It's mandatory to have the build declared.
try:
targetbuilddata = builds[buildname]
except:
targetbuilddata = {}
try:
targetalldata = targetbuilddata['all']
except:
targetalldata = {}
try:
targetdata = targetbuilddata[targetname]
except:
targetdata = {}
#if not allbuilddata and not targetbuilddata:
# raise Exception("Build '" + buildname + "' is not declared.")
data = copy(allalldata)
for key in alldata: data[key] = alldata[key]
for key in targetalldata: data[key] = targetalldata[key]
for key in targetdata: data[key] = targetdata[key]
if not data:
raise Exception("No target '" + targetname + "' for build '" + buildname + "'.")
else:
if 'target' not in data:
data['target'] = targetname
return data
| mit | 3,694,323,395,176,270,000 | 23.231707 | 82 | 0.694766 | false |
joshuamorton/calc_three_proj | plot.py | 1 | 1969 | from matplotlib import pyplot as plt
import numpy as np
import iterative
import pascal
import power
plt.style.use('ggplot')
qr = []
lu = []
for i in range(2, 13):
q = pascal.solve_qr_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
l = pascal.solve_lu_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
qr.append(q)
lu.append(l)
plt.subplot(1, 1, 1)
x = range(2, 13)
y = [i[1] for i in qr]
z = [i[2] for i in qr]
plt.plot(x, y, color='blue') # error from householder
plt.plot(x, z, color='green') # solution error of qr
plt.yscale('log')
plt.savefig('./qr_err.png')
y = [i[1] for i in lu]
z = [i[2] for i in lu]
plt.clf()
plt.plot(x, y, color='blue')
plt.plot(x, z, color='green')
plt.yscale('log')
plt.savefig('./lu_err.png')
plt.clf()
jacobi, gs = iterative.generate_data()
j_vals = [i[1] for i in jacobi]
g_vals = [i[1] for i in gs]
jacobi_approx = sum(j_vals) / len(j_vals) # 2c
gs_approx = sum(g_vals) / len(g_vals)
print("Averages, jacobi then gauss-seidel, then iterations")
print(jacobi_approx)
print(gs_approx)
print(float(sum(j[2] for j in jacobi))/sum(g[2] for g in gs))
exact = np.array([9.0/190, 28.0/475, 33.0/475]).reshape(3,1)
errs_jacobi = [pascal.norm_inf(j-exact) for j in j_vals]
errs_gs = [pascal.norm_inf(g-exact) for g in g_vals]
plt.plot([j[2] for j in jacobi], errs_jacobi, 'ko', [g[2] for g in gs], errs_jacobi, 'bo')
plt.savefig('./iterative_err.png')
plt.clf()
powers = power.generate_data()
ds = [p[0] for p in powers if p[0] is not None]
ts = [p[1] for p in powers if p[1] is not None]
tis = [p[2] for p in powers if p[2] is not None]
maxs = [p[3] for p in powers if p[3] is not None]
mins = [p[4] for p in powers if p[4] is not None]
big = max(maxs)
small = max(mins)
maxs = [float(m)/big for m in maxs]
mins = [float(m)/small for m in mins]
plt.scatter(ds, ts, c=maxs)
plt.savefig('./power_mat.png')
plt.clf()
plt.scatter([1.0/d for d in ds], tis, c=mins)
plt.savefig('./power_inv.png')
plt.clf() | mit | -2,766,495,094,339,962,000 | 25.621622 | 90 | 0.648045 | false |
berkmancenter/mediacloud | apps/extract-and-vector/tests/python/extract_and_vector/dbi/stories/stories/test_add_story.py | 1 | 1727 | from mediawords.dbi.stories.stories import add_story
from .setup_test_stories import TestStories
class TestAddStory(TestStories):
def test_add_story(self):
"""Test add_story()."""
media_id = self.test_medium['media_id']
feeds_id = self.test_feed['feeds_id']
# Basic story
story = {
'media_id': media_id,
'url': 'http://add.story/',
'guid': 'http://add.story/',
'title': 'test add story',
'description': 'test add story',
'publish_date': '2016-10-15 08:00:00',
'collect_date': '2016-10-15 10:00:00',
'full_text_rss': True,
}
added_story = add_story(db=self.db, story=story, feeds_id=feeds_id)
assert added_story
assert 'stories_id' in added_story
assert story['url'] == added_story['url']
assert added_story['full_text_rss'] is True
feeds_stories_tag_mapping = self.db.select(
table='feeds_stories_map',
what_to_select='*',
condition_hash={
'stories_id': added_story['stories_id'],
'feeds_id': feeds_id,
}
).hashes()
assert len(feeds_stories_tag_mapping) == 1
story_urls = self.db.query(
"select * from story_urls where stories_id = %(a)s",
{'a': added_story['stories_id']}).hashes()
assert len(story_urls) == 1
assert story_urls[0]['url'] == added_story['url']
# Try adding a duplicate story
dup_story = add_story(db=self.db, story=story, feeds_id=feeds_id)
assert dup_story is not None
assert dup_story['stories_id'] == added_story['stories_id']
| agpl-3.0 | -8,725,383,054,378,140,000 | 34.244898 | 75 | 0.544876 | false |
RobinQuetin/CAIRIS-web | cairis/cairis/DocumentReferenceParameters.py | 1 | 1320 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ObjectCreationParameters import ObjectCreationParameters
class DocumentReferenceParameters(ObjectCreationParameters):
def __init__(self,refName,docName,cName,docExc):
ObjectCreationParameters.__init__(self)
self.theName = refName
self.theDocName = docName
self.theContributor = cName
self.theExcerpt = docExc
def name(self): return self.theName
def document(self): return self.theDocName
def contributor(self): return self.theContributor
def description(self): return self.theExcerpt
| apache-2.0 | -8,654,469,905,426,422,000 | 40.25 | 63 | 0.768182 | false |
fonttools/skia-pathops | ci/download_libskia.py | 1 | 2713 | import argparse
import glob
import logging
import platform
import os
import shutil
import struct
import tempfile
__requires__ = ["github_release"]
import github_release
GITHUB_REPO = "fonttools/skia-builder"
ASSET_TEMPLATE = "libskia-{plat}-{arch}.zip"
DOWNLOAD_DIR = os.path.join("build", "download")
CPU_ARCH = "x64" if struct.calcsize("P") * 8 == 64 else "x86"
PLATFORM_TAGS = {"Linux": "linux", "Darwin": "mac", "Windows": "win"}
logger = logging.getLogger()
def get_latest_release(repo):
releases = github_release.get_releases(repo)
if not releases:
raise ValueError("no releases found for {!r}".format(repo))
return releases[0]
def download_unpack_assets(repo, tag, asset_name, dest_dir):
dest_dir = os.path.abspath(dest_dir)
os.makedirs(dest_dir, exist_ok=True)
with tempfile.TemporaryDirectory() as tmpdir:
curdir = os.getcwd()
os.chdir(tmpdir)
try:
downloaded = github_release.gh_asset_download(repo, tag, asset_name)
except:
raise
else:
if not downloaded:
raise ValueError(
"no assets found for {0!r} with name {1!r}".format(tag, asset_name)
)
for archive in glob.glob(asset_name):
shutil.unpack_archive(archive, dest_dir)
finally:
os.chdir(curdir)
if __name__ == "__main__":
logging.basicConfig(level="INFO")
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--platform",
default=PLATFORM_TAGS.get(platform.system(), "win"),
choices=["win", "mac", "linux"],
help="The desired platform (default: %(default)s)",
)
parser.add_argument(
"-a",
"--cpu-arch",
default=CPU_ARCH,
help="The desired CPU architecture (default: %(default)s)",
choices=["x86", "x64"],
)
parser.add_argument(
"-d",
"--download-dir",
default=DOWNLOAD_DIR,
help="directory where to download libskia (default: %(default)s)",
)
parser.add_argument(
"-t", "--tag-name", default=None, help="release tag name (default: latest)"
)
args = parser.parse_args()
tag_name = args.tag_name
if tag_name is None:
latest_release = get_latest_release(GITHUB_REPO)
tag_name = latest_release["tag_name"]
asset_name = ASSET_TEMPLATE.format(plat=args.platform, arch=args.cpu_arch)
logger.info(
"Downloading '%s' from '%s' at tag '%s' to %s",
asset_name,
GITHUB_REPO,
tag_name,
args.download_dir,
)
download_unpack_assets(GITHUB_REPO, tag_name, asset_name, args.download_dir)
| bsd-3-clause | 2,709,180,981,875,565,000 | 27.260417 | 87 | 0.596756 | false |
reinbach/django-machina | tests/integration/templatetags/test_forum_tags.py | 1 | 4090 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.contrib.sessions.middleware import SessionMiddleware
from django.template import Context
from django.template.base import Template
from django.template.loader import render_to_string
from django.test.client import RequestFactory
from machina.apps.forum_permission.middleware import ForumPermissionMiddleware
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
Forum = get_model('forum', 'Forum')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
ForumVisibilityContentTree = get_class('forum.visibility', 'ForumVisibilityContentTree')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
@pytest.mark.django_db
class TestForumLastPostTag(object):
@pytest.fixture(autouse=True)
def setup(self):
self.loadstatement = '{% load forum_tags %}'
self.user = UserFactory.create()
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level category
self.top_level_cat = create_category_forum()
# Set up some forums
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
# Set up some topics and posts
self.forum_1_topic = create_topic(forum=self.forum_1, poster=self.user)
self.forum_2_topic = create_topic(forum=self.forum_2, poster=self.user)
self.post_1 = PostFactory.create(topic=self.forum_1_topic, poster=self.user)
self.post_2 = PostFactory.create(topic=self.forum_2_topic, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.user, self.top_level_cat)
assign_perm('can_read_forum', self.user, self.forum_1)
def test_can_provide_the_last_post_of_a_forum(self):
# Setup
t = Template(self.loadstatement + '{% get_forum_last_post forum user as var %}')
c = Context({'forum': self.forum_1, 'user': self.user})
# Run
rendered = t.render(c)
# Check
assert rendered == ''
assert c['var'] == self.post_1
@pytest.mark.django_db
class TestForumListTag(object):
@pytest.fixture(autouse=True)
def setup(self):
self.loadstatement = '{% load forum_tags %}'
self.request_factory = RequestFactory()
self.user = UserFactory.create()
# Set up a top-level category
self.top_level_cat = create_category_forum()
# Set up some forums
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
def test_can_render_a_list_of_forums_according_to_their_minimum_tree_level(self):
# Setup
forums = Forum.objects.all()
request = self.request_factory.get('/')
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
request.user = self.user
ForumPermissionMiddleware().process_request(request)
t = Template(self.loadstatement + '{% forum_list forums %}')
c = Context({'forums': ForumVisibilityContentTree.from_forums(forums), 'request': request})
expected_out = render_to_string(
'machina/forum/forum_list.html',
{
'forum_contents': ForumVisibilityContentTree.from_forums(forums),
'user': self.user,
'root_level': 0,
'root_level_middle': 1,
'root_level_sub': 2,
}
)
# Run
rendered = t.render(c)
# Check
assert rendered != ''
assert rendered == expected_out
| bsd-3-clause | -8,260,004,356,734,359,000 | 36.181818 | 99 | 0.662592 | false |
biothings/biothings_explorer | biothings_explorer/smartapi_kg/__init__.py | 1 | 1114 | import sys
from .dataload import load_specs
from .smartapi_parser import SmartAPIParser
from .filter import filterOps
import traceback
class MetaKG:
def __init__(self):
self.ops = []
self.parser = SmartAPIParser()
def populateOpsFromSpecs(self, specs, verbose=False):
"""Populate meta-kg operations based on SmartAPI specifications"""
self.ops = []
for spec in specs:
try:
self.parser.load_spec(spec)
self.ops += self.parser.fetch_endpoint_info()
except:
if verbose:
print("Unexpected error:", sys.exc_info()[0])
print(
"failed to load the following spec {}".format(spec.get("info"))
)
def constructMetaKG(self, source="remote", tag="translator"):
"""Construct API Meta Knowledge Graph based on SmartAPI Specifications."""
specs = load_specs(source=source, tag=tag)
self.populateOpsFromSpecs(specs)
def filter(self, criteria):
return filterOps(self.ops, criteria)
| apache-2.0 | 8,065,000,017,719,369,000 | 31.764706 | 87 | 0.590664 | false |
dbcli/vcli | vcli/packages/sqlcompletion.py | 1 | 13865 | from __future__ import print_function
import sys
import sqlparse
from sqlparse.sql import Comparison, Identifier, Where
from .parseutils import last_word, extract_tables, find_prev_keyword
from .vspecial import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
word_before_cursor = last_word(text_before_cursor,
include='many_punctuations')
identifier = None
# If we've partially typed a word then word_before_cursor won't be an empty
# string. In that case we want to remove the partially typed string before
# sending it to the sqlparser. Otherwise the last token will always be the
# partially typed string which renders the smart completion useless because
# it will always return the list of keywords as completion.
if word_before_cursor:
if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\':
parsed = sqlparse.parse(text_before_cursor)
else:
parsed = sqlparse.parse(
text_before_cursor[:-len(word_before_cursor)])
# word_before_cursor may include a schema qualification, like
# "schema_name.partial_name" or "schema_name.", so parse it
# separately
p = sqlparse.parse(word_before_cursor)[0]
if p.tokens and isinstance(p.tokens[0], Identifier):
identifier = p.tokens[0]
else:
parsed = sqlparse.parse(text_before_cursor)
if len(parsed) > 1:
# Multiple statements being edited -- isolate the current one by
# cumulatively summing statement lengths to find the one that bounds the
# current position
current_pos = len(text_before_cursor)
stmt_start, stmt_end = 0, 0
for statement in parsed:
stmt_len = len(statement.to_unicode())
stmt_start, stmt_end = stmt_end, stmt_end + stmt_len
if stmt_end >= current_pos:
text_before_cursor = full_text[stmt_start:current_pos]
full_text = full_text[stmt_start:]
break
elif parsed:
# A single statement
statement = parsed[0]
else:
# The empty string
statement = None
# Check for special commands and handle those separately
if statement:
# Be careful here because trivial whitespace is parsed as a statement,
# but the statement won't have a first token
tok1 = statement.token_first()
if tok1 and tok1.value == '\\':
return suggest_special(text_before_cursor)
last_token = statement and statement.token_prev(len(statement.tokens)) or ''
return suggest_based_on_last_token(last_token, text_before_cursor,
full_text, identifier)
def suggest_special(text):
text = text.lstrip()
cmd, _, arg = parse_special_command(text)
if cmd == text:
# Trying to complete the special command itself
return [{'type': 'special'}]
if cmd in ('\\c', '\\connect'):
return [{'type': 'database'}]
if cmd == '\\dn':
return [{'type': 'schema'}]
if arg:
# Try to distinguish "\d name" from "\d schema.name"
# Note that this will fail to obtain a schema name if wildcards are
# used, e.g. "\d schema???.name"
parsed = sqlparse.parse(arg)[0].tokens[0]
try:
schema = parsed.get_parent_name()
except AttributeError:
schema = None
else:
schema = None
if cmd[1:] == 'd':
# \d can descibe tables or views
if schema:
return [{'type': 'table', 'schema': schema},
{'type': 'view', 'schema': schema}]
else:
return [{'type': 'schema'},
{'type': 'table', 'schema': []},
{'type': 'view', 'schema': []}]
elif cmd[1:] in ('dt', 'dv', 'df', 'dT'):
rel_type = {'dt': 'table',
'dv': 'view',
'df': 'function',
'dT': 'datatype',
}[cmd[1:]]
if schema:
return [{'type': rel_type, 'schema': schema}]
else:
return [{'type': 'schema'},
{'type': rel_type, 'schema': []}]
if cmd in ['\\n', '\\ns', '\\nd']:
return [{'type': 'namedquery'}]
return [{'type': 'keyword'}, {'type': 'special'}]
def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier):
if isinstance(token, string_types):
token_v = token.lower()
elif isinstance(token, Comparison):
# If 'token' is a Comparison type such as
# 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling
# token.value on the comparison type will only return the lhs of the
# comparison. In this case a.id. So we need to do token.tokens to get
# both sides of the comparison and pick the last token out of that
# list.
token_v = token.tokens[-1].value.lower()
elif isinstance(token, Where):
# sqlparse groups all tokens from the where clause into a single token
# list. This means that token.value may be something like
# 'where foo > 5 and '. We need to look "inside" token.tokens to handle
# suggestions in complicated where clauses correctly
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
return suggest_based_on_last_token(prev_keyword, text_before_cursor,
full_text, identifier)
elif isinstance(token, Identifier):
# If the previous token is an identifier, we can suggest datatypes if
# we're in a parenthesized column/field list, e.g.:
# CREATE TABLE foo (Identifier <CURSOR>
# CREATE FUNCTION foo (Identifier <CURSOR>
# If we're not in a parenthesized list, the most likely scenario is the
# user is about to specify an alias, e.g.:
# SELECT Identifier <CURSOR>
# SELECT foo FROM Identifier <CURSOR>
prev_keyword, _ = find_prev_keyword(text_before_cursor)
if prev_keyword and prev_keyword.value == '(':
# Suggest datatypes
return suggest_based_on_last_token('type', text_before_cursor,
full_text, identifier)
else:
return [{'type': 'keyword'}]
else:
token_v = token.value.lower()
if not token:
return [{'type': 'keyword'}, {'type': 'special'}]
elif token_v.endswith('('):
p = sqlparse.parse(text_before_cursor)[0]
if p.tokens and isinstance(p.tokens[-1], Where):
# Four possibilities:
# 1 - Parenthesized clause like "WHERE foo AND ("
# Suggest columns/functions
# 2 - Function call like "WHERE foo("
# Suggest columns/functions
# 3 - Subquery expression like "WHERE EXISTS ("
# Suggest keywords, in order to do a subquery
# 4 - Subquery OR array comparison like "WHERE foo = ANY("
# Suggest columns/functions AND keywords. (If we wanted to be
# really fancy, we could suggest only array-typed columns)
column_suggestions = suggest_based_on_last_token('where',
text_before_cursor, full_text, identifier)
# Check for a subquery expression (cases 3 & 4)
where = p.tokens[-1]
prev_tok = where.token_prev(len(where.tokens) - 1)
if isinstance(prev_tok, Comparison):
# e.g. "SELECT foo FROM bar WHERE foo = ANY("
prev_tok = prev_tok.tokens[-1]
prev_tok = prev_tok.value.lower()
if prev_tok == 'exists':
return [{'type': 'keyword'}]
elif prev_tok in ('any', 'some', 'all'):
return column_suggestions + [{'type': 'keyword'}]
elif prev_tok == 'in':
# Technically, we should suggest columns AND keywords, as
# per case 4. However, IN is different from ANY, SOME, ALL
# in that it can accept a *list* of columns, or a subquery.
# But suggesting keywords for , "SELECT * FROM foo WHERE bar IN
# (baz, qux, " would be overwhelming. So we special case 'IN'
# to not suggest keywords.
return column_suggestions
else:
return column_suggestions
# Get the token before the parens
prev_tok = p.token_prev(len(p.tokens) - 1)
if prev_tok and prev_tok.value and prev_tok.value.lower() == 'using':
# tbl1 INNER JOIN tbl2 USING (col1, col2)
tables = extract_tables(full_text)
# suggest columns that are present in more than one table
return [{'type': 'column', 'tables': tables, 'drop_unique': True}]
elif p.token_first().value.lower() == 'select':
# If the lparen is preceeded by a space chances are we're about to
# do a sub-select.
if last_word(text_before_cursor,
'all_punctuations').startswith('('):
return [{'type': 'keyword'}]
# We're probably in a function argument list
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('set', 'by', 'distinct'):
return [{'type': 'column', 'tables': extract_tables(full_text)}]
elif token_v in ('select', 'where', 'having'):
# Check for a table alias or schema qualification
parent = (identifier and identifier.get_parent_name()) or []
if parent:
tables = extract_tables(full_text)
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
return [{'type': 'column', 'tables': extract_tables(full_text)},
{'type': 'function', 'schema': []}]
elif (token_v.endswith('join') and token.is_keyword) or (token_v in
('copy', 'from', 'update', 'into', 'describe', 'truncate')):
schema = (identifier and identifier.get_parent_name()) or []
# Suggest tables from either the currently-selected schema or the
# public schema if no schema has been specified
suggest = [{'type': 'table', 'schema': schema}]
if not schema:
# Suggest schemas
suggest.insert(0, {'type': 'schema'})
# Only tables can be TRUNCATED, otherwise suggest views
if token_v != 'truncate':
suggest.append({'type': 'view', 'schema': schema})
return suggest
elif token_v in ('table', 'view', 'function'):
# E.g. 'DROP FUNCTION <funcname>', 'ALTER TABLE <tablname>'
rel_type = token_v
schema = (identifier and identifier.get_parent_name()) or []
if schema:
return [{'type': rel_type, 'schema': schema}]
else:
return [{'type': 'schema'}, {'type': rel_type, 'schema': []}]
elif token_v == 'on':
tables = extract_tables(full_text) # [(schema, table, alias), ...]
parent = (identifier and identifier.get_parent_name()) or []
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
tables = [t for t in tables if identifies(parent, *t)]
return [{'type': 'column', 'tables': tables},
{'type': 'table', 'schema': parent},
{'type': 'view', 'schema': parent},
{'type': 'function', 'schema': parent}]
else:
# ON <suggestion>
# Use table alias if there is one, otherwise the table name
aliases = [t[2] or t[1] for t in tables]
return [{'type': 'alias', 'aliases': aliases}]
elif token_v in ('c', 'use', 'database', 'template'):
# "\c <db", "use <db>", "DROP DATABASE <db>",
# "CREATE DATABASE <newdb> WITH TEMPLATE <db>"
return [{'type': 'database'}]
elif token_v == 'schema':
# DROP SCHEMA schema_name
return [{'type': 'schema'}]
elif token_v.endswith(',') or token_v == '=':
prev_keyword, text_before_cursor = find_prev_keyword(text_before_cursor)
if prev_keyword:
return suggest_based_on_last_token(
prev_keyword, text_before_cursor, full_text, identifier)
else:
return []
elif token_v in ('type', '::'):
# ALTER TABLE foo SET DATA TYPE bar
# SELECT foo::bar
# Note that tables are a form of composite type in postgresql, so
# they're suggested here as well
schema = (identifier and identifier.get_parent_name()) or []
suggestions = [{'type': 'datatype', 'schema': schema},
{'type': 'table', 'schema': schema}]
if not schema:
suggestions.append({'type': 'schema'})
return suggestions
else:
return [{'type': 'keyword'}]
def identifies(id, schema, table, alias):
return id == alias or id == table or (
schema and (id == schema + '.' + table))
| bsd-3-clause | -1,566,311,390,984,765,200 | 41.015152 | 82 | 0.560765 | false |
mezuro/kalibro_client_py | tests/processor/test_kalibro_module.py | 1 | 1104 | from unittest import TestCase
from nose.tools import assert_equal, assert_true
from tests.factories import KalibroModuleFactory
from tests.helpers import not_raises
class TestKalibroModule(TestCase):
def setUp(self):
self.subject = KalibroModuleFactory.build()
def test_properties_getters(self):
assert_true(hasattr(self.subject, 'name'))
long_name = "test.name"
self.subject.long_name = long_name
assert_equal(self.subject.name, long_name.split("."))
@not_raises((AttributeError, ValueError))
def test_properties_setters(self):
long_name = "test.name"
self.subject.name = long_name
assert_equal(self.subject.long_name, long_name)
name = ["test", "name"]
self.subject.name = name
assert_equal(self.subject.long_name, ".".join(name))
def test_short_name(self):
name = ["test", "name"]
self.subject.name = name
assert_equal(self.subject.short_name, name[-1])
def test_granularity(self):
assert_equal(self.subject.granularity, self.subject.granularity)
| lgpl-3.0 | -215,531,830,724,538,370 | 29.666667 | 72 | 0.663043 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.