repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
KenjiTakahashi/gayeogi | gayeogi/main.py | 1 | 18805 | # -*- coding: utf-8 -*-
# This is a part of gayeogi @ http://github.com/KenjiTakahashi/gayeogi/
# Karol "Kenji Takahashi" Woźniak © 2010 - 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from PyQt4 import QtGui
from PyQt4.QtCore import Qt, QSettings, QLocale, QTranslator
from PyQt4.QtCore import pyqtSignal, QModelIndex
from gayeogi.db.local import DB
from gayeogi.db.distributor import Distributor
from gayeogi.interfaces.settings import Settings
from gayeogi.utils import Filter
import gayeogi.plugins
__version__ = '0.6.3'
locale = QLocale.system().name()
if sys.platform == 'win32':
from PyQt4.QtGui import QDesktopServices
service = QDesktopServices()
dbPath = os.path.join(
unicode(service.storageLocation(9)), u'gayeogi', u'db'
)
lnPath = u''
else: # Most POSIX systems, there may be more elifs in future.
dbPath = os.path.expanduser(u'~/.config/gayeogi/db')
lnPath = os.path.dirname(__file__)
class ADRItemDelegate(QtGui.QStyledItemDelegate):
buttonClicked = pyqtSignal(QModelIndex)
def __init__(self, parent=None):
super(ADRItemDelegate, self).__init__(parent)
self.palette = QtGui.QPalette()
self.buttoned = False
self.mx = 0
self.my = 0
self.ry = 0
self.rry = -1
self.rx = 0
self.ht = 0
def paint(self, painter, option, index):
super(ADRItemDelegate, self).paint(painter, option, QModelIndex())
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(Qt.NoPen)
painter.setBrush(self.palette.mid())
ry = option.rect.y()
rx = option.rect.x()
width = option.rect.width()
self.ht = option.rect.height()
self.ry = ry
self.rx = rx
metrics = option.fontMetrics
lineHeight = metrics.lineSpacing()
linePos = ry + (self.ht - lineHeight) / 2
painter.drawRoundedRect(
rx + 1, linePos,
36, lineHeight,
20, 60, Qt.RelativeSize
)
painter.setPen(QtGui.QPen())
x = rx + 8 + metrics.width(u'a')
if index.data(234).toBool():
painter.drawText(x, linePos + lineHeight - 3, u'd')
x += metrics.width(u'd')
if index.data(345).toBool():
painter.drawText(x, linePos + lineHeight - 3, u'r')
if self.buttonOver(rx, ry):
if self.buttoned:
if self.my >= ry + 1 and self.my <= ry + self.ht - 6:
self.rry = ry
self.buttonClicked.emit(index)
self.buttoned = False
elif ry != self.rry:
painter.setPen(QtGui.QPen(self.palette.brightText(), 0))
self.rry = -1
painter.drawText(rx + 8, linePos + lineHeight - 3, u'a')
elif index.data(123).toBool():
painter.drawText(rx + 8, linePos + lineHeight - 3, u'a')
elif index.data(123).toBool():
painter.drawText(rx + 8, linePos - lineHeight - 3, u'a')
painter.restore()
pSize = self.ht / 2 + option.font.pointSize() / 2
if pSize % 2 == 0:
pSize += 1
pSize -= 1
painter.save()
if option.state & QtGui.QStyle.State_Selected:
if option.state & QtGui.QStyle.State_HasFocus:
painter.setPen(QtGui.QPen(self.palette.highlightedText(), 0))
else:
painter.setPen(QtGui.QPen(self.palette.brightText(), 0))
painter.drawText(
rx + 39, ry + pSize, index.data(Qt.DisplayRole).toString()
)
painter.restore()
pixmap = index.data(666).toPyObject()
if pixmap:
painter.drawPixmap(rx + width - 80, ry, pixmap)
def buttonOver(self, x, y):
return (
self.mx >= x + 1 and self.mx <= x + 36 and
self.my >= y + 1 and self.my <= y + self.ht
)
class TableView(QtGui.QTableView):
"""Docstring for TableView """
def __init__(self, state, parent=None):
"""@todo: to be defined
:parent: @todo
"""
super(TableView, self).__init__(parent)
self.setSelectionMode(self.ExtendedSelection)
self.setSelectionBehavior(self.SelectRows)
self.setEditTriggers(self.NoEditTriggers)
self.setShowGrid(False)
self.setCornerButtonEnabled(False)
self.setWordWrap(False)
vheader = self.verticalHeader()
vheader.setHidden(True)
hheader = self.horizontalHeader()
hheader.setStretchLastSection(True)
hheader.setDefaultAlignment(Qt.AlignLeft)
hheader.setHighlightSections(False)
hheader.setMovable(True)
hheader.setContextMenuPolicy(Qt.CustomContextMenu)
hheader.customContextMenuRequested.connect(self.showHeaderContextMenu)
# This restores state over and over for every column added.
# FIXME: Restore state once (somehow).
#hheader.sectionCountChanged.connect(
#lambda: self.horizontalHeader().restoreState(state)
#)
def showHeaderContextMenu(self):
"""@todo: Docstring for showHeaderContextMenu """
menu = QtGui.QMenu()
model = self.model()
for i in xrange(model.columnCount()):
action = menu.addAction(
model.headerData(i, Qt.Horizontal, Qt.DisplayRole).toString()
)
action.setProperty(u'column', i)
action.setCheckable(True)
if not self.isColumnHidden(i):
action.setChecked(True)
menu.triggered.connect(self.showHideColumn)
menu.exec_(QtGui.QCursor.pos())
def showHideColumn(self, action):
"""@todo: Docstring for showHideColumn
:action: @todo
"""
column = action.property(u'column').toInt()[0]
self.setColumnHidden(column, not self.isColumnHidden(column))
class ADRTableView(TableView):
def __init__(self, state, parent=None):
super(ADRTableView, self).__init__(state, parent)
self.setMouseTracking(True)
self.delegate = ADRItemDelegate()
self.delegate.buttonClicked.connect(self.callback)
self.setItemDelegateForColumn(1, self.delegate)
def buttoned(self, mx, rx):
return mx >= rx + 1 and mx <= rx + 36
def callback(self, index):
self.model().setData(index, not index.data(123).toBool(), 123)
def mouseMoveEvent(self, event):
if event.y() == 0 or self.delegate.rry + self.delegate.ht < event.y():
self.delegate.rry = -1
self.delegate.mx = event.x()
self.delegate.my = event.y()
self.viewport().update()
def mouseReleaseEvent(self, event):
if not self.buttoned(event.x(), self.delegate.rx):
super(ADRTableView, self).mouseReleaseEvent(event)
else:
self.delegate.buttoned = True
self.delegate.my = event.y()
self.viewport().update()
def mousePressEvent(self, event):
if not self.buttoned(event.x(), self.delegate.rx):
super(ADRTableView, self).mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
if not self.buttoned(event.x(), self.delegate.rx):
super(ADRTableView, self).mouseDoubleClickEvent(event)
class View(QtGui.QWidget):
def __init__(self, model, view, pixmap, parent=None):
"""@todo: Docstring for __init__
:model: @todo
:view: @todo
:pixmap: @todo
:parent: @todo
"""
super(View, self).__init__(parent)
self.model = Filter(model)
self.filter = QtGui.QLineEdit()
self.filter.textEdited.connect(self.model.setFilter)
self.filter.setStatusTip(self.trUtf8((
u"Pattern: <pair>|<pair>, "
u"where <pair> is <column_name>:<searching_phrase> or (not) "
u"(a or d or r). Case insensitive, regexp allowed."
)))
self.view = view
vheader = self.view.verticalHeader()
if pixmap:
vheader.setDefaultSectionSize(80)
else:
vheader.setDefaultSectionSize(
self.view.fontMetrics().lineSpacing()
)
self.view.setModel(self.model)
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.filter)
layout.addWidget(self.view)
self.setLayout(layout)
self.view.setSortingEnabled(True)
class Main(QtGui.QMainWindow):
__settings = QSettings(u'gayeogi', u'gayeogi')
__dbsettings = QSettings(u'gayeogi', u'Databases')
def __init__(self):
super(Main, self).__init__()
if not os.path.exists(dbPath):
os.mkdir(dbPath)
if os.path.exists(os.path.join(dbPath[:-3], u'db.pkl')):
pass # TODO: convert old db to new
else:
dialog = Settings()
dialog.exec_()
self.db = DB(dbPath)
self.db.finished.connect(self.enableButtons)
self.db.artistsStatisticsChanged.connect(self.updateArtistsStatistics)
self.db.albumsStatisticsChanged.connect(self.updateAlbumsStatistics)
from interfaces.main import Ui_main
self.ui = Ui_main()
widget = QtGui.QWidget()
self.ui.setupUi(widget)
self.ui.artists = View(self.db.artists, TableView(
self.__settings.value(u'artistsView').toByteArray()
), Main.__dbsettings.value(
u'image/artist/enabled', 2
).toBool(), self.ui.splitter)
delegate = ADRItemDelegate()
self.ui.artists.view.setItemDelegateForColumn(0, delegate)
self.ui.albums = View(self.db.albums, ADRTableView(
self.__settings.value(u'albumsView').toByteArray()
), Main.__dbsettings.value(
u'image/album/enabled', 2
).toBool(), self.ui.splitter)
self.ui.tracks = View(self.db.tracks, TableView(
self.__settings.value(u'tracksView').toByteArray()
), None, self.ui.splitter)
self.ui.tracks.view.setAlternatingRowColors(True)
self.ui.artists.view.selectionModel().selectionChanged.connect(
self.ui.albums.model.setSelection
)
self.ui.albums.view.selectionModel().selectionChanged.connect(
self.ui.tracks.model.setSelection
)
self.ui.plugins = {}
self.ui.splitter.restoreState(
self.__settings.value(u'splitters').toByteArray()
)
self.setCentralWidget(widget)
self.rt = Distributor(self.db.iterator())
self.rt.stepped.connect(self.statusBar().showMessage)
self.rt.finished.connect(self.enableButtons)
self.ui.local.clicked.connect(self.disableButtons)
self.ui.local.clicked.connect(self.db.start)
self.ui.remote.clicked.connect(self.disableButtons)
self.ui.remote.clicked.connect(self.rt.start)
self.ui.close.clicked.connect(self.close)
self.ui.save.clicked.connect(self.save)
self.ui.settings.clicked.connect(self.showSettings)
self.statusBar()
self.setWindowTitle(u'gayeogi ' + __version__)
self.translators = list()
self.loadPluginsTranslators()
self.loadPlugins()
def disableButtons(self):
"""Disable some buttons one mustn't use during the update."""
self.ui.local.setDisabled(True)
self.ui.remote.setDisabled(True)
self.ui.save.setDisabled(True)
self.ui.settings.setDisabled(True)
def enableButtons(self):
"""Enable buttons disabled by Main.disableButtons.
Also shows the "Done" message.
"""
self.ui.local.setEnabled(True)
self.ui.remote.setEnabled(True)
self.ui.save.setEnabled(True)
self.ui.settings.setEnabled(True)
self.statusBar().showMessage(self.trUtf8('Done'))
def loadPluginsTranslators(self):
reload(gayeogi.plugins)
app = QtGui.QApplication.instance()
for plugin in gayeogi.plugins.__all__:
translator = QTranslator()
if translator.load(plugin + u'_' + locale,
os.path.join(lnPath, u'plugins', u'langs')):
self.translators.append(translator)
app.installTranslator(translator)
def removePluginsTranslators(self):
app = QtGui.QApplication.instance()
for translator in self.translators:
app.removeTranslator(translator)
def loadPlugins(self):
def depends(plugin):
for p in gayeogi.plugins.__all__:
class_ = getattr(gayeogi.plugins, p).Main
if plugin in class_.depends and class_.loaded:
return True
return False
for plugin in gayeogi.plugins.__all__:
class_ = getattr(gayeogi.plugins, plugin).Main
__settings_ = QSettings(u'gayeogi', class_.name)
option = __settings_.value(u'enabled', 0).toInt()[0]
if option and not class_.loaded:
class__ = class_(self.ui, self.db.artists, self.appendPlugin,
self.removePlugin)
class__.load()
self.ui.plugins[plugin] = class__
elif not option and class_.loaded:
self.ui.plugins[plugin].unload()
for d in self.ui.plugins[plugin].depends:
if not self.ui.plugins[d].loaded \
and d in self.ui.plugins.keys():
del self.ui.plugins[d]
if not depends(plugin):
del self.ui.plugins[plugin]
def appendPlugin(self, parent, child, position):
parent = getattr(self.ui, parent)
if position == 'start':
position = 0
elif position == 'end':
position = len(parent.parent().children()) - 7
if isinstance(parent, QtGui.QLayout):
widget = parent.itemAt(position)
if not widget:
parent.insertWidget(position, child)
else:
if isinstance(widget, QtGui.QTabWidget):
widget.addTab(child, child.name)
else:
try:
widget.name
except AttributeError:
parent.insertWidget(position, child)
else:
widget = parent.takeAt(position).widget()
tab = QtGui.QTabWidget()
tab.setTabPosition(tab.South)
tab.addTab(widget, widget.name)
tab.addTab(child, child.name)
parent.insertWidget(position, tab)
def removePlugin(self, parent, child, position):
parent = getattr(self.ui, parent)
if position == 'start':
position = 0
elif position == 'end':
position = len(parent.parent().children()) - 8
if isinstance(parent, QtGui.QLayout):
widget = parent.itemAt(position).widget()
try:
if widget.name == child.name:
parent.takeAt(position).widget().deleteLater()
except AttributeError:
for i in range(widget.count()):
if widget.widget(i).name == child.name:
widget.removeTab(i)
if widget.count() == 1:
tmp = widget.widget(0)
parent.takeAt(position).widget().deleteLater()
parent.insertWidget(position, tmp)
parent.itemAt(position).widget().show()
def showSettings(self):
u"""Show settings dialog and then update accordingly."""
def __save():
self.removePluginsTranslators()
self.loadPluginsTranslators()
self.loadPlugins()
dialog = Settings()
dialog.ok.clicked.connect(__save)
dialog.exec_()
def save(self):
u"""Save database to file."""
self.db.save()
self.statusBar().showMessage(self.trUtf8('Saved'))
def updateArtistsStatistics(self, a, d, r):
"""Updates global artists' statistics.
:a: A statistics.
:d: D statistics.
:r: R statistics.
"""
self.ui.artistsGreen.setText(unicode(a))
self.ui.artistsYellow.setText(unicode(d))
self.ui.artistsRed.setText(unicode(r))
def updateAlbumsStatistics(self, a, d, r):
"""Updated global albums' statistics.
@note: Attributes as in Main.updateArtistsStatistics.
"""
self.ui.albumsGreen.setText(unicode(a))
self.ui.albumsYellow.setText(unicode(d))
self.ui.albumsRed.setText(unicode(r))
def closeEvent(self, event):
def unload():
for plugin in self.ui.plugins.values():
plugin.unload()
self.__settings.setValue(
u'splitters', self.ui.splitter.saveState()
)
self.__settings.setValue(u'artistsView',
self.ui.artists.view.horizontalHeader().saveState()
)
self.__settings.setValue(u'albumsView',
self.ui.albums.view.horizontalHeader().saveState()
)
self.__settings.setValue(u'tracksView',
self.ui.tracks.view.horizontalHeader().saveState()
)
if self.db.modified:
from interfaces.confirmation import ConfirmationDialog
dialog = ConfirmationDialog()
dialog.buttons.accepted.connect(self.save)
dialog.buttons.accepted.connect(unload)
dialog.buttons.rejected.connect(event.ignore)
dialog.buttons.helpRequested.connect(unload)
dialog.exec_()
else:
unload()
def run():
app = QtGui.QApplication(sys.argv)
app.setApplicationName(u'gayeogi')
translator = QTranslator()
if translator.load(u'main_' + locale, os.path.join(lnPath, u'langs')):
app.installTranslator(translator)
main = Main()
main.show()
sys.exit(app.exec_())
| gpl-3.0 | 8,449,969,891,091,645,000 | 37.21748 | 78 | 0.5898 | false | 3.901037 | false | false | false |
dereknewman/cancer_detection | cube_writer.py | 1 | 5978 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 22:22:17 2017
@author: derek
"""
import numpy as np
import tensorflow as tf
def get_image_binary(filename):
image_cube = np.load(filename)
image_cube = np.asarray(image_cube,np.int16)
shape = np.array(image_cube.shape, np.int32)
return shape.tobytes(), image_cube.tobytes() #convert image to raw data bytes in the array
def write_to_tfrecord(labels, shape, binary_image, tfrecord_file):
writer = tf.python_io.TFRecordWriter(tfrecord_file)
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])),
'shape': tf.train.Feature(bytes_list=tf.train.BytesList(value=[shape])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_image]))
}))
writer.write(example.SerializeToString())
writer.close()
def read_from_tfrecord(filename):
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
filename = "cubes1.tfrecord"
reader = tf.TFRecordReader()
key, tfrecord_serialized = reader.read(filename)
# Convert from a string to a vector of uint8 that is record_bytes long.
tfrecord_features = tf.parse_single_example(tfrecord_serialized,feautres={
'label' : tf.FixedLenFeature([], tf.string),
'shape' : tf.FixedLenFeature([], tf.string),
'image' : tf.FixedLenFeature([], tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image'],tf.int16)
shape = tf.decode_raw(tfrecord_features['shape'],tf.int32)
label = tf.decode_raw(tfrecord_features['label'],tf.int16)
image_cube = tf.reshape(image, shape)
return label, shape, image_cube
#$$#$#
def patient_to_tfrecord(patient_id, image_array, patient_df):
patient_id = "1.4.5.6.123551485448654"
tfrecord_file = patient_id + ".tfrecord"
writer = tf.python_io.TFRecordWriter(tfrecord_file)
for i in range(1000):
image_cube = np.random.randint(-1000,1000,[32,32,32],dtype=np.int16)
image_label = np.random.randint(0,5,3,dtype=np.int16)
image_cube = np.asarray(image_cube,np.int16) #ensure data is in int16
binary_cube = image_cube.tobytes()
image_label = np.array(image_label,np.int16) #ensure data is in int16
binary_label = image_label.tobytes()
shape = np.array(image_cube.shape, np.int32) #ensure data is in int16
binary_shape = shape.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_label])),
'shape': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_shape])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_cube]))
}))
writer.write(example.SerializeToString())
writer.close()
patient_id = "1.4.5.6.123551485448654"
tfrecord_file = patient_id + ".tfrecord"
writer = tf.python_io.TFRecordWriter(tfrecord_file)
for i in range(1000):
image_cube = np.random.randint(-1000,1000,[32,32,32],dtype=np.int16)
image_label = np.random.randint(0,5,3,dtype=np.int16)
image_cube = np.asarray(image_cube,np.int16) #ensure data is in int16
binary_cube = image_cube.tobytes()
image_label = np.array(image_label,np.int16) #ensure data is in int16
binary_label = image_label.tobytes()
shape = np.array(image_cube.shape, np.int32) #ensure data is in int16
binary_shape = shape.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_label])),
'shape': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_shape])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[binary_cube]))
}))
writer.write(example.SerializeToString())
writer.close()
#$#$#$#
filenames = ["cubes1.tfrecord"]
tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
reader = tf.TFRecordReader()
key, tfrecord_serialized = reader.read(tfrecord_file_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
tfrecord_features = tf.parse_single_example(tfrecord_serialized,features={
'label' : tf.FixedLenFeature([], tf.string),
'shape' : tf.FixedLenFeature([], tf.string),
'image' : tf.FixedLenFeature([], tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image'],tf.int16)
shape = tf.decode_raw(tfrecord_features['shape'],tf.int32)
label = tf.decode_raw(tfrecord_features['label'],tf.int16)
image_cube = tf.reshape(image, shape)
##########################################################################
#filenames = ["cubes1.tfrecord", "cubes2.tfrecord"]
#dataset = tf.contrib.data.TFRecordDataset(filenames)
# Transforms a scalar string `example_proto` into a pair of a scalar string and
# a scalar integer, representing an image and its label, respectively.
def _parse_function(example_proto):
features = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.int32, default_value=0)}
parsed_features = tf.parse_single_example(example_proto, features)
return parsed_features["image"], parsed_features["label"]
# Creates a dataset that reads all of the examples from two files, and extracts
# the image and label features.
filenames = ["/var/data/file1.tfrecord", "/var/data/file2.tfrecord"]
dataset = tf.contrib.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function)
print(sess.run(label))
| mit | -6,577,436,787,872,455,000 | 39.120805 | 95 | 0.659752 | false | 3.423826 | false | false | false |
migueldiascosta/pymatgen-db | matgendb/vv/validate.py | 1 | 25272 | """
Collection validator
"""
__author__ = "Dan Gunter"
__copyright__ = "Copyright 2012-2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Dan Gunter"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "1/31/13"
import pymongo
import random
import re
import sys
import collections
from .util import DoesLogging, total_size
#from .mquery import *
from smoqe.query import *
import six
class DBError(Exception):
pass
class ValidatorSyntaxError(Exception):
"Syntax error in configuration of Validator"
def __init__(self, target, desc):
msg = 'Invalid syntax: {} -> "{}"'.format(desc, target)
Exception.__init__(self, msg)
class PythonMethod(object):
"""Encapsulate an external Python method that will be run on our target
MongoDB collection to perform arbitrary types of validation.
"""
_PATTERN = re.compile(r'\s*(@\w+)(\s+\w+)*')
CANNOT_COMBINE_ERR = 'Call to a Python method cannot be combined '
'with any other constraints'
BAD_CONSTRAINT_ERR = 'Invalid constraint (must be: @<method> [<param> ..])'
@classmethod
def constraint_is_method(cls, text):
"""Check from the text of the constraint whether it is
a Python method, as opposed to a 'normal' constraint.
:return: True if it is, False if not
"""
m = cls._PATTERN.match(text)
return m is not None
def __init__(self, text):
"""Create new instance from a raw constraint string.
:raises: ValidatorSyntaxerror
"""
if not self._PATTERN.match(text):
raise ValidatorSyntaxError(text, self.BAD_CONSTRAINT_ERR)
tokens = re.split('@?\s+', text)
if len(tokens) < 1:
raise ValidatorSyntaxError(text, self.BAD_CONSTRAINT_ERR)
self.method = tokens[0]
self.params = tokens[1:]
def mongo_get(rec, key, default=None):
"""
Get value from dict using MongoDB dot-separated path semantics.
For example:
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b') == 1
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'x') == 2
>>> assert mongo_get({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None
:param rec: mongodb document
:param key: path to mongo value
:param default: default to return if not found
:return: value, potentially nested, or default if not found
:raise: ValueError, if record is not a dict.
"""
if not rec:
return default
if not isinstance(rec, collections.Mapping):
raise ValueError('input record must act like a dict')
if not '.' in key:
return rec.get(key, default)
for key_part in key.split('.'):
if not isinstance(rec, collections.Mapping):
return default
if not key_part in rec:
return default
rec = rec[key_part]
return rec
class Projection(object):
"""Fields on which to project the query results.
"""
def __init__(self):
self._fields = {}
self._slices = {}
def add(self, field, op=None, val=None):
"""Update report fields to include new one, if it doesn't already.
:param field: The field to include
:type field: Field
:param op: Operation
:type op: ConstraintOperator
:return: None
"""
if field.has_subfield():
self._fields[field.full_name] = 1
else:
self._fields[field.name] = 1
if op and op.is_size() and not op.is_variable():
# get minimal part of array with slicing,
# but cannot use slice with variables
self._slices[field.name] = val + 1
if op and op.is_variable():
# add the variable too
self._fields[val] = 1
def to_mongo(self):
"""Translate projection to MongoDB query form.
:return: Dictionary to put into a MongoDB JSON query
:rtype: dict
"""
d = copy.copy(self._fields)
for k, v in six.iteritems(self._slices):
d[k] = {'$slice': v}
return d
class ConstraintViolation(object):
"""A single constraint violation, with no metadata.
"""
def __init__(self, constraint, value, expected):
"""Create new constraint violation
:param constraint: The constraint that was violated
:type constraint: Constraint
"""
self._constraint = constraint
self._got = value
self._expected = expected
@property
def field(self):
return self._constraint.field.name
@property
def op(self):
#return str(self._constraint.op)
return self._constraint.op.display_op
@property
def got_value(self):
return self._got
@property
def expected_value(self):
return self._expected
@expected_value.setter
def expected_value(self, value):
self._expected = value
class NullConstraintViolation(ConstraintViolation):
"""Empty constraint violation, for when there are no constraints.
"""
def __init__(self):
ConstraintViolation.__init__(self, Constraint('NA', '=', 'NA'), 'NA', 'NA')
class ConstraintViolationGroup(object):
"""A group of constraint violations with metadata.
"""
def __init__(self):
"""Create an empty object.
"""
self._viol = []
# These are read/write
self.subject = ''
self.condition = None
def add_violations(self, violations, record=None):
"""Add constraint violations and associated record.
:param violations: List of violations
:type violations: list(ConstraintViolation)
:param record: Associated record
:type record: dict
:rtype: None
"""
rec = {} if record is None else record
for v in violations:
self._viol.append((v, rec))
def __iter__(self):
return iter(self._viol)
def __len__(self):
return len(self._viol)
class ProgressMeter(object):
"""Simple progress tracker
"""
def __init__(self, num, fmt):
self._n = num
self._subject = '?'
self._fmt = fmt
self._count = 0
self._total = 0
@property
def count(self):
return self._total
def set_subject(self, subj):
self._subject = subj
def update(self, *args):
self._count += 1
self._total += 1
if self._n == 0 or self._count < self._n:
return
sys.stderr.write(self._fmt.format(*args, subject=self._subject, count=self.count))
sys.stderr.write('\n')
sys.stderr.flush()
self._count = 0
class ConstraintSpec(DoesLogging):
"""Specification of a set of constraints for a collection.
"""
FILTER_SECT = 'filter'
CONSTRAINT_SECT = 'constraints'
SAMPLE_SECT = 'sample'
def __init__(self, spec):
"""Create specification from a configuration.
:param spec: Configuration for a single collection
:type spec: dict
:raise: ValueError if specification is wrong
"""
DoesLogging.__init__(self, name='mg.ConstraintSpec')
self._sections, _slist = {}, []
for item in spec:
self._log.debug("build constraint from: {}".format(item))
if isinstance(item, dict):
self._add_complex_section(item)
else:
self._add_simple_section(item)
def __iter__(self):
"""Return a list of all the sections.
:rtype: list(ConstraintSpecSection)
"""
sect = []
# simple 1-level flatten operation
for values in six.itervalues(self._sections):
for v in values:
sect.append(v)
return iter(sect)
def _add_complex_section(self, item):
"""Add a section that has a filter and set of constraints
:raise: ValueError if filter or constraints is missing
"""
# extract filter and constraints
try:
fltr = item[self.FILTER_SECT]
except KeyError:
raise ValueError("configuration requires '{}'".format(self.FILTER_SECT))
sample = item.get(self.SAMPLE_SECT, None)
constraints = item.get(self.CONSTRAINT_SECT, None)
section = ConstraintSpecSection(fltr, constraints, sample)
key = section.get_key()
if key in self._sections:
self._sections[key].append(section)
else:
self._sections[key] = [section]
def _add_simple_section(self, item):
self._sections[None] = [ConstraintSpecSection(None, item, None)]
class ConstraintSpecSection(object):
def __init__(self, fltr, constraints, sample):
self._filter, self._constraints, self._sampler = fltr, constraints, sample
# make condition(s) into a tuple
if isinstance(fltr, basestring):
self._key = (fltr,)
elif fltr is None:
self._key = None
else:
self._key = tuple(fltr)
# parse sample keywords into class, if present
if sample:
self._sampler = Sampler(**sample)
def get_key(self):
return self._key
@property
def sampler(self):
return self._sampler
@property
def filters(self):
return self._filter
@property
def constraints(self):
return self._constraints
class Validator(DoesLogging):
"""Validate a collection.
"""
class SectionParts:
"""Encapsulate the tuple of information for each section of filters, constraints,
etc. within a collection.
"""
def __init__(self, cond, body, sampler, report_fields):
"""Create new initialized set of parts.
:param cond: Condition to filter records
:type cond: MongoQuery
:param body: Main set of constraints
:type body: MongoQuery
:param sampler: Sampling class if any
:type sampler: Sampler
:param report_fields: Fields to report on
:type report_fields: list
"""
self.cond, self.body, self.sampler, self.report_fields = \
cond, body, sampler, report_fields
def __init__(self, max_violations=50, max_dberrors=10, aliases=None, add_exists=False):
DoesLogging.__init__(self, name='mg.validator')
self.set_progress(0)
self._aliases = aliases if aliases else {}
self._max_viol = max_violations
if self._max_viol > 0:
self._find_kw = {'limit': self._max_viol}
else:
self._find_kw = {}
self._max_dberr = max_dberrors
self._base_report_fields = {'_id': 1, 'task_id': 1}
self._add_exists = add_exists
def set_aliases(self, a):
"""Set aliases.
"""
self._aliases = a
def set_progress(self, num):
"""Report progress every `num` bad records.
:param num: Report interval
:type num: int
:return: None
"""
report_str = 'Progress for {subject}: {count:d} invalid, {:d} db errors, {:d} bytes'
self._progress = ProgressMeter(num, report_str)
def num_violations(self):
if self._progress is None:
return 0
return self._progress._count
def validate(self, coll, constraint_spec, subject='collection'):
"""Validation of a collection.
This is a generator that yields ConstraintViolationGroups.
:param coll: Mongo collection
:type coll: pymongo.Collection
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
:param subject: Name of the thing being validated
:type subject: str
:return: Sets of constraint violation, one for each constraint_section
:rtype: ConstraintViolationGroup
:raises: ValidatorSyntaxError
"""
self._spec = constraint_spec
self._progress.set_subject(subject)
self._build(constraint_spec)
for sect_parts in self._sections:
cvg = self._validate_section(subject, coll, sect_parts)
if cvg is not None:
yield cvg
def _validate_section(self, subject, coll, parts):
"""Validate one section of a spec.
:param subject: Name of subject
:type subject: str
:param coll: The collection to validate
:type coll: pymongo.Collection
:param parts: Section parts
:type parts: Validator.SectionParts
:return: Group of constraint violations, if any, otherwise None
:rtype: ConstraintViolationGroup or None
"""
cvgroup = ConstraintViolationGroup()
cvgroup.subject = subject
# If the constraint is an 'import' of code, treat it differently here
if self._is_python(parts):
num_found = self._run_python(cvgroup, coll, parts)
return None if num_found == 0 else cvgroup
query = parts.cond.to_mongo(disjunction=False)
query.update(parts.body.to_mongo())
cvgroup.condition = parts.cond.to_mongo(disjunction=False)
self._log.debug('Query spec: {}'.format(query))
self._log.debug('Query fields: {}'.format(parts.report_fields))
# Find records that violate 1 or more constraints
cursor = coll.find(query, fields=parts.report_fields, **self._find_kw)
if parts.sampler is not None:
cursor = parts.sampler.sample(cursor)
nbytes, num_dberr, num_rec = 0, 0, 0
while 1:
try:
record = six.advance_iterator(cursor)
nbytes += total_size(record)
num_rec += 1
except StopIteration:
self._log.info("collection {}: {:d} records, {:d} bytes, {:d} db-errors"
.format(subject, num_rec, nbytes, num_dberr))
break
except pymongo.errors.PyMongoError as err:
num_dberr += 1
if num_dberr > self._max_dberr > 0:
raise DBError("Too many errors")
self._log.warn("DB.{:d}: {}".format(num_dberr, err))
continue
# report progress
if self._progress:
self._progress.update(num_dberr, nbytes)
# get reasons for badness
violations = self._get_violations(parts.body, record)
cvgroup.add_violations(violations, record)
return None if nbytes == 0 else cvgroup
def _get_violations(self, query, record):
"""Reverse-engineer the query to figure out why a record was selected.
:param query: MongoDB query
:type query: MongQuery
:param record: Record in question
:type record: dict
:return: Reasons why bad
:rtype: list(ConstraintViolation)
"""
# special case, when no constraints are given
if len(query.all_clauses) == 0:
return [NullConstraintViolation()]
# normal case, check all the constraints
reasons = []
for clause in query.all_clauses:
var_name = None
key = clause.constraint.field.name
op = clause.constraint.op
fval = mongo_get(record, key)
if fval is None:
expected = clause.constraint.value
reasons.append(ConstraintViolation(clause.constraint, 'missing', expected))
continue
if op.is_variable():
# retrieve value for variable
var_name = clause.constraint.value
value = mongo_get(record, var_name, default=None)
if value is None:
reasons.append(ConstraintViolation(clause.constraint, 'missing', var_name))
continue
clause.constraint.value = value # swap out value, temporarily
# take length for size
if op.is_size():
if isinstance(fval, six.string_types) or not hasattr(fval, '__len__'):
reasons.append(ConstraintViolation(clause.constraint, type(fval), 'sequence'))
if op.is_variable():
clause.constraint.value = var_name # put original value back
continue
fval = len(fval)
ok, expected = clause.constraint.passes(fval)
if not ok:
reasons.append(ConstraintViolation(clause.constraint, fval, expected))
if op.is_variable():
clause.constraint.value = var_name # put original value back
return reasons
def _build(self, constraint_spec):
"""Generate queries to execute.
Sets instance variables so that Mongo query strings, etc. can now
be extracted from the object.
:param constraint_spec: Constraint specification
:type constraint_spec: ConstraintSpec
"""
self._sections = []
# For each condition in the spec
for sval in constraint_spec:
rpt_fld = self._base_report_fields.copy()
#print("@@ CONDS = {}".format(sval.filters))
#print("@@ MAIN = {}".format(sval.constraints))
# Constraints
# If the constraint is an external call to Python code
if self._is_python(sval.constraints):
query, proj = self._process_python(sval.constraints)
rpt_fld.update(proj.to_mongo())
# All other constraints, e.g. 'foo > 12'
else:
query = MongoQuery()
if sval.constraints is not None:
groups = self._process_constraint_expressions(sval.constraints)
projection = Projection()
for cg in six.itervalues(groups):
for c in cg:
projection.add(c.field, c.op, c.value)
query.add_clause(MongoClause(c))
if self._add_exists:
for c in cg.existence_constraints:
query.add_clause(MongoClause(c, exists_main=True))
rpt_fld.update(projection.to_mongo())
# Filters
cond_query = MongoQuery()
if sval.filters is not None:
cond_groups = self._process_constraint_expressions(sval.filters, rev=False)
for cg in six.itervalues(cond_groups):
for c in cg:
cond_query.add_clause(MongoClause(c, rev=False))
# Done. Add a new 'SectionPart' for the filter and constraint
result = self.SectionParts(cond_query, query, sval.sampler, rpt_fld)
self._sections.append(result)
def _process_constraint_expressions(self, expr_list, conflict_check=True, rev=True):
"""Create and return constraints from expressions in expr_list.
:param expr_list: The expressions
:conflict_check: If True, check for conflicting expressions within each field
:return: Constraints grouped by field (the key is the field name)
:rtype: dict
"""
# process expressions, grouping by field
groups = {}
for expr in expr_list:
field, raw_op, val = parse_expr(expr)
op = ConstraintOperator(raw_op)
if field not in groups:
groups[field] = ConstraintGroup(Field(field, self._aliases))
groups[field].add_constraint(op, val)
# add existence constraints
for cgroup in six.itervalues(groups):
cgroup.add_existence(rev)
# optionally check for conflicts
if conflict_check:
# check for conflicts in each group
for field_name, group in six.iteritems(groups):
conflicts = group.get_conflicts()
if conflicts:
raise ValueError('Conflicts for field {}: {}'.format(field_name, conflicts))
return groups
def _is_python(self, constraint_list):
"""Check whether constraint is an import of Python code.
:param constraint_list: List of raw constraints from YAML file
:type constraint_list: list(str)
:return: True if this refers to an import of code, False otherwise
:raises: ValidatorSyntaxError
"""
if len(constraint_list) == 1 and \
PythonMethod.constraint_is_method(constraint_list[0]):
return True
if len(constraint_list) > 1 and \
any(filter(PythonMethod.constraint_is_method, constraint_list)):
condensed_list = '/'.join(constraint_list)
err = PythonMethod.CANNOT_COMBINE_ERR
raise ValidatorSyntaxError(condensed_list, err)
return False
def _process_python(self, expr_list):
"""Create a wrapper for a call to some external Python code.
:param expr_list: The expressions
:return: Tuple of (query, field-projection)
:rtype: (PythonMethod, Projection)
"""
return None, None
def set_aliases(self, new_value):
"Set aliases and wrap errors in ValueError"
try:
self.aliases = new_value
except Exception as err:
raise ValueError("invalid value: {}".format(err))
class Sampler(DoesLogging):
"""Randomly sample a proportion of the full collection.
"""
# Random uniform distribution
DIST_RUNIF = 1
# Default distribution
DEFAULT_DIST = DIST_RUNIF
# Names of distributions
DIST_CODES = {'uniform': DIST_RUNIF}
def __init__(self, min_items=0, max_items=1e9, p=1.0, distrib=DEFAULT_DIST, **kw):
"""Create new parameterized sampler.
:param min_items: Minimum number of items in the sample
:param max_items: Maximum number of items in the sample
:param p: Probability of selecting an item
:param distrib: Probability distribution code, one of DIST_<name> in this class
:type distrib: str or int
:raise: ValueError, if `distrib` is an unknown code or string
"""
DoesLogging.__init__(self, 'mg.sampler')
# Sanity checks
if min_items < 0:
raise ValueError('min_items cannot be negative ({:d})'.format(min_items))
if (max_items != 0) and (max_items < min_items):
raise ValueError('max_items must be zero or >= min_items ({:d} < {:d})'.format(max_items, min_items))
if not (0.0 <= p <= 1.0):
raise ValueError('probability, p, must be between 0 and 1 ({:f})'.format(p))
self.min_items = min_items
self.max_items = max_items
self.p = p
self._empty = True
# Distribution
if not isinstance(distrib, int):
distrib = self.DIST_CODES.get(str(distrib), None)
if distrib == self.DIST_RUNIF:
self._keep = self._keep_runif
else:
raise ValueError("unrecognized distribution: {}".format(distrib))
@property
def is_empty(self):
return self._empty
def _keep_runif(self):
return self.p >= random.uniform(0, 1)
def sample(self, cursor):
"""Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty
"""
count = cursor.count()
# special case: empty collection
if count == 0:
self._empty = True
raise ValueError("Empty collection")
# special case: entire collection
if self.p >= 1 and self.max_items <= 0:
for item in cursor:
yield item
return
# calculate target number of items to select
if self.max_items <= 0:
n_target = max(self.min_items, self.p * count)
else:
if self.p <= 0:
n_target = max(self.min_items, self.max_items)
else:
n_target = max(self.min_items, min(self.max_items, self.p * count))
if n_target == 0:
raise ValueError("No items requested")
# select first `n_target` items that pop up with
# probability self.p
# This is actually biased to items at the beginning
# of the file if n_target is smaller than (p * count),
n = 0
while n < n_target:
try:
item = six.advance_iterator(cursor)
except StopIteration:
# need to keep looping through data until
# we get all our items!
cursor.rewind()
item = six.advance_iterator(cursor)
if self._keep():
yield item
n += 1
| mit | 5,023,407,409,135,420,000 | 33.809917 | 113 | 0.578585 | false | 4.292849 | false | false | false |
vfiebig/rpaas | rpaas/ssl_plugins/le_authenticator.py | 1 | 2219 | """RPAAS plugin."""
import logging
import pipes
import time
import zope.interface
from acme import challenges
from letsencrypt import interfaces
from letsencrypt.plugins import common
import rpaas
logger = logging.getLogger(__name__)
class RpaasLeAuthenticator(common.Plugin):
"""RPAAS Authenticator.
This plugin create a authentticator for Tsuru RPAAS.
"""
zope.interface.implements(interfaces.IAuthenticator)
zope.interface.classProvides(interfaces.IPluginFactory)
hidden = True
description = "Configure RPAAS HTTP server"
CMD_TEMPLATE = """\
location /{achall.URI_ROOT_PATH}/{encoded_token} {{
default_type text/plain;
echo -n '{validation}';
}}
"""
"""Command template."""
def __init__(self, hosts, *args, **kwargs):
super(RpaasLeAuthenticator, self).__init__(*args, **kwargs)
self._root = './le'
self._httpd = None
self.hosts = hosts
def get_chall_pref(self, domain):
return [challenges.HTTP01]
def perform(self, achalls): # pylint: disable=missing-docstring
responses = []
for achall in achalls:
responses.append(self._perform_single(achall))
return responses
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
self._notify_and_wait(self.CMD_TEMPLATE.format(
achall=achall, validation=pipes.quote(validation),
encoded_token=achall.chall.encode("token")))
if response.simple_verify(
achall.chall, achall.domain,
achall.account_key.public_key(), self.config.http01_port):
return response
else:
logger.error(
"Self-verify of challenge failed, authorization abandoned.")
return None
def _notify_and_wait(self, message): # pylint: disable=no-self-use
nginx_manager = rpaas.get_manager().nginx_manager
for host in self.hosts:
nginx_manager.acme_conf(host, message)
time.sleep(6)
# TODO: update rpaas nginx
# sys.stdout.write(message)
# raw_input("Press ENTER to continue")
def cleanup(self, achalls):
pass
| bsd-3-clause | 441,820,940,751,737,340 | 27.088608 | 76 | 0.638125 | false | 3.819277 | false | false | false |
lixiangning888/whole_project | modules/processing/procmemory.py | 1 | 4103 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import struct
PAGE_NOACCESS = 0x00000001
PAGE_READONLY = 0x00000002
PAGE_READWRITE = 0x00000004
PAGE_WRITECOPY = 0x00000008
PAGE_EXECUTE = 0x00000010
PAGE_EXECUTE_READ = 0x00000020
PAGE_EXECUTE_READWRITE = 0x00000040
PAGE_EXECUTE_WRITECOPY = 0x00000080
PAGE_GUARD = 0x00000100
PAGE_NOCACHE = 0x00000200
PAGE_WRITECOMBINE = 0x00000400
protmap = {
PAGE_NOACCESS : "NOACCESS",
PAGE_READONLY : "R",
PAGE_READWRITE : "RW",
PAGE_WRITECOPY : "RWC",
PAGE_EXECUTE : "X",
PAGE_EXECUTE_READ : "RX",
PAGE_EXECUTE_READWRITE : "RWX",
PAGE_EXECUTE_WRITECOPY : "RWXC",
}
from lib.cuckoo.common.abstracts import Processing
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.constants import CUCKOO_ROOT
class ProcessMemory(Processing):
"""Analyze process memory dumps."""
order = 10
def prot_to_str(self, prot):
if prot & PAGE_GUARD:
return "G"
prot &= 0xff
return protmap[prot]
def coalesce_chunks(self, chunklist):
low = chunklist[0]["start"]
high = chunklist[-1]["end"]
prot = chunklist[0]["prot"]
PE = chunklist[0]["PE"]
for chunk in chunklist:
if chunk["prot"] != prot:
prot = "Mixed"
return { "start" : low, "end" : high, "size" : "0x%x" % (int(high, 16) - int(low, 16)), "prot" : prot, "PE" : PE, "chunks" : chunklist }
def parse_dump(self, dmp_path):
f = open(dmp_path, "rb")
address_space = []
curchunk = []
lastend = 0
while True:
data = f.read(24)
if data == '':
break
alloc = dict()
addr,size,mem_state,mem_type,mem_prot = struct.unpack("QIIII", data)
offset = f.tell()
if addr != lastend and len(curchunk):
address_space.append(self.coalesce_chunks(curchunk))
curchunk = []
lastend = addr + size
alloc["start"] = "0x%.08x" % addr
alloc["end"] = "0x%.08x" % (addr + size)
alloc["size"] = "0x%x" % size
alloc["prot"] = self.prot_to_str(mem_prot)
alloc["state"] = mem_state
alloc["type"] = mem_type
alloc["offset"] = offset
alloc["PE"] = False
if f.read(2) == "MZ":
alloc["PE"] = True
f.seek(size-2, 1)
curchunk.append(alloc)
if len(curchunk):
address_space.append(self.coalesce_chunks(curchunk))
return address_space
def run(self):
"""Run analysis.
@return: structured results.
"""
self.key = "procmemory"
results = []
if os.path.exists(self.pmemory_path):
for dmp in os.listdir(self.pmemory_path):
dmp_path = os.path.join(self.pmemory_path, dmp)
dmp_file = File(dmp_path)
process_name = ""
process_path = ""
process_id = int(os.path.splitext(os.path.basename(dmp_path))[0])
if "behavior" in self.results and "processes" in self.results["behavior"]:
for process in self.results["behavior"]["processes"]:
if process_id == process["process_id"]:
process_name = process["process_name"]
process_path = process["module_path"]
proc = dict(
file=dmp_path,
pid=process_id,
name=process_name,
path=process_path,
yara=dmp_file.get_yara(os.path.join(CUCKOO_ROOT, "data", "yara", "index_memory.yar")),
address_space=self.parse_dump(dmp_path)
)
results.append(proc)
return results
| lgpl-3.0 | 8,511,867,462,212,490,000 | 33.771186 | 144 | 0.528638 | false | 3.595968 | false | false | false |
inf0-warri0r/soccer_playing_ai | soccer_game.py | 1 | 9422 | """
Author : tharindra galahena (inf0_warri0r)
Project: soccer playing ai agents using finite state machines
Blog : http://www.inf0warri0r.blogspot.com
Date : 21/07/2013
License:
Copyright 2013 Tharindra Galahena
This is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version. This is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
* You should have received a copy of the GNU General Public License along with
this. If not, see http://www.gnu.org/licenses/.
"""
from Tkinter import *
import player
import random
import ball
players1 = list()
for i in range(0, 5):
p = player.player(i, 0, 600, 150)
players1.append(p)
players2 = list()
for i in range(0, 5):
p = player.player(i, 1, 0, 150)
players2.append(p)
b = ball.ball(300, 150, 300, 150, 300, 150)
def find_nearest_playerest_to_ball_1(bx, by):
ls = list()
for i in range(0, 5):
d = (players1[i].pos_current_x - bx) ** 2.0
d = d + (players1[i].pos_current_y - by) ** 2.0
d = d ** 0.5
ls.append((d, i))
ls = sorted(ls)
return ls[0]
def find_nearest_playerest_to_ball_2(bx, by):
ls = list()
for i in range(0, 5):
d = (players2[i].pos_current_x - bx) ** 2.0
d = d + (players2[i].pos_current_y - by) ** 2.0
d = d ** 0.5
ls.append((d, i))
ls = sorted(ls)
return ls[0]
def find_nearest_player_1(px, py):
ls = list()
for i in range(0, 5):
d = (players1[i].pos_current_x - px) ** 2.0
d = d + (players1[i].pos_current_y - py) ** 2.0
d = d ** 0.5
ls.append((d, 0, i))
ls = sorted(ls)
return ls[0]
def find_nearest_player_2(px, py):
ls = list()
for i in range(0, 5):
d = (players2[i].pos_current_x - px) ** 2.0
d = d + (players2[i].pos_current_y - py) ** 2.0
d = d ** 0.5
ls.append((d, 1, i))
ls = sorted(ls)
return ls[0]
def find_safest_player_1(n):
mn = 100000
ind = n
dd = find_nearest_player_2(players1[n].pos_current_x,
players1[n].pos_current_y)[0]
for i in range(0, 5):
if i == n:
continue
d = find_nearest_player_2(players1[i].pos_current_x,
players1[i].pos_current_y)
yy = (players1[i].pos_current_y - players1[n].pos_current_y) ** 2.0
if d[0] > dd and yy > 400:
dst = (players1[i].pos_current_x - players1[i].pos_goal_x) ** 2.0
if mn > dst:
mn = dst
ind = i
return ind
def find_safest_player_2(n):
mn = 100000
ind = n
dd = find_nearest_player_1(players2[n].pos_current_x,
players2[n].pos_current_y)[0]
for i in range(0, 5):
if i == n:
continue
d = find_nearest_player_1(players2[i].pos_current_x,
players2[i].pos_current_y)
if d[0] > dd:
dst = (players2[i].pos_current_x - players2[i].pos_goal_x) ** 2.0
if mn > dst:
mn = dst
ind = i
return ind
def find_friend(t, n):
ls = list()
if t == 0:
for i in range(0, 5):
if i == n:
continue
d1 = (players1[i].pos_current_x - players1[n].pos_current_x) ** 2.0
d2 = (players1[i].pos_current_y - players1[i].pos_current_y) ** 2.0
d = (d1 + d2) ** 0.5
ls.append((d, 0, i))
else:
for i in range(0, 5):
if i == n:
continue
d1 = (players2[i].pos_current_x - players2[n].pos_current_x) ** 2.0
d2 = (players2[i].pos_current_y - players2[n].pos_current_y) ** 2.0
d = (d1 + d2) ** 0.5
ls.append((d, 1, i))
ls = sorted(ls)
return ls
root = Tk()
root.title("soccer - inf0_warri0r")
chart_1 = Canvas(root,
width=600,
height=400,
background="black")
chart_1.grid(row=0, column=0)
red = 0
blue = 0
flage = True
while 1:
chart_1.create_rectangle(0, 0, 600, 300, fill='#1c0',
outline='yellow', width=3)
chart_1.create_oval(240, 90, 360, 210, fill='#1c0',
outline='yellow', width=3)
chart_1.create_line(300, 0, 300, 300, fill='yellow', width=3)
for i in range(0, 5):
chart_1.create_oval(players1[i].pos_current_x - 6,
players1[i].pos_current_y - 6,
players1[i].pos_current_x + 6,
players1[i].pos_current_y + 6,
fill='red')
chart_1.create_text(players1[i].pos_current_x + 7,
players1[i].pos_current_y + 7,
text=str(players1[i].index + 1),
fill='white')
for i in range(0, 5):
chart_1.create_oval(players2[i].pos_current_x - 6,
players2[i].pos_current_y - 6,
players2[i].pos_current_x + 6,
players2[i].pos_current_y + 6,
fill='blue')
chart_1.create_text(players2[i].pos_current_x + 7,
players2[i].pos_current_y - 7,
text=str(players2[i].index + 1),
fill='white')
chart_1.create_oval(b.pos_current_x - 5, b.pos_current_y - 5,
b.pos_current_x + 5, b.pos_current_y + 5,
fill='yellow')
txt = 'score : red = ' + str(red) + ' blue = ' + str(blue)
chart_1.create_text(300, 350, text=txt, fill='white')
if flage:
chart_1.update()
chart_1.after(600)
chart_1.delete(ALL)
bls1 = find_nearest_playerest_to_ball_1(b.pos_current_x, b.pos_current_y)
bls2 = find_nearest_playerest_to_ball_2(b.pos_current_x, b.pos_current_y)
rd = random.randrange(0, 100)
if rd < 50:
for i in range(0, 5):
players1[i].change_state(b, bls1[1])
ind = find_safest_player_1(i)
px = -1
py = -1
if ind != i:
px = players1[ind].pos_current_x
py = players1[ind].pos_current_y
xd = find_nearest_player_2(players1[i].pos_current_x,
players1[i].pos_current_y)
gole = find_nearest_player_2(players1[i].pos_goal_x,
players1[i].pos_goal_y)
b = players1[i].move(b, 0, px, py, xd, gole[0],
players1, players2)
for i in range(0, 5):
players2[i].change_state(b, bls2[1])
ind = find_safest_player_2(i)
px = -1
py = -1
if ind != i:
px = players2[ind].pos_current_x
py = players2[ind].pos_current_y
xd = find_nearest_player_1(players2[i].pos_current_x,
players2[i].pos_current_y)
gole = find_nearest_player_1(players2[i].pos_goal_x,
players2[i].pos_goal_y)
b = players2[i].move(b, 1, px, py, xd, gole[0],
players1, players2)
else:
for i in range(0, 5):
players2[i].change_state(b, bls2[1])
ind = find_safest_player_2(i)
px = -1
py = -1
if ind != i:
px = players2[ind].pos_current_x
py = players2[ind].pos_current_y
xd = find_nearest_player_1(players2[i].pos_current_x,
players2[i].pos_current_y)
gole = find_nearest_player_1(players2[i].pos_goal_x,
players2[i].pos_goal_y)
b = players2[i].move(b, 1, px, py, xd, gole[0],
players1, players2)
for i in range(0, 5):
players1[i].change_state(b, bls1[1])
ind = find_safest_player_1(i)
px = -1
py = -1
if ind != i:
px = players1[ind].pos_current_x
py = players1[ind].pos_current_y
xd = find_nearest_player_2(players1[i].pos_current_x,
players1[i].pos_current_y)
gole = find_nearest_player_2(players1[i].pos_goal_x,
players1[i].pos_goal_y)
b = players1[i].move(b, 0, px, py, xd, gole[0],
players1, players2)
b.state_change()
b.move()
if not flage:
chart_1.update()
chart_1.after(100)
chart_1.delete(ALL)
else:
flage = False
if b.pos_current_x >= 590 or b.pos_current_x <= 10:
if b.pos_current_x <= 10:
blue = blue + 1
else:
red = red + 1
for i in range(0, 5):
players1[i].reset()
players2[i].reset()
b.reset()
flage = True
root.mainloop()
| gpl-3.0 | -226,032,285,302,610,240 | 31.944056 | 79 | 0.486945 | false | 3.208035 | false | false | false |
jerome-nexedi/dream | dream/plugins/UpdateWIP.py | 1 | 5830 | from copy import copy
from dream.plugins import plugin
import datetime
# XXX HARDCODED
MACHINE_TYPE_SET = set(["Dream.MachineJobShop", "Dream.MouldAssembly"])
class UpdateWIP(plugin.InputPreparationPlugin):
""" Input preparation
reads the data from external data base and updates the WIP
"""
def getWIPIds(self):
"""returns the ids of the parts that are in the WIP dictionary"""
wipIDs = []
for key in self.data["input"]["BOM"].get("WIP", {}).keys():
wipIDs.append(key)
return wipIDs
def preprocess(self, data):
""" updates the Work in Process according to what is provided by the BOM, i.e. if a design just exited the last step of it's sequence
"""
self.data = copy(data)
orders = self.data["input"]["BOM"]["productionOrders"]
nodes = self.data["graph"]["node"]
wip = self.data["input"]["BOM"].get("WIP", {})
""" get the tasks that are in the WIP, and place those that are not in the WIP in the corresponding stations. Consider the parts that have concluded their routes, or the components that are not created yet.
All the components defined by the corresponding orders should be examined
"""
wipToBeRemoved = []
# # check all the orders
for order in orders:
orderComponents = order.get("componentsList", [])
designComplete = False # flag to inform if the design is concluded
completedComponents = [] # list to hold the componentIDs that are concluded
# # find all the components
for component in orderComponents:
componentID = component["id"]
route = component["route"]
# # figure out if they are defined in the WIP
if componentID in self.getWIPIds():
work = wip[componentID]
# # extract WIP information
workStation = work["station"]
remainingProcessingTime = float(work.get("remainingProcessingTime",0))
task_id = work["task_id"]
assert len(route)>0, "the OrderComponent must have a route defined with length more than 0"
assert task_id, "there must be a task_id defined for the OrderComponent in the WIP"
# # get the step identified by task_id, hold the step_index to see if the entity's route is concluded
for step_index, step in enumerate(route):
if step["task_id"] == task_id:
last_step = step
break
# # check if the entity has left the station
if remainingProcessingTime:
currentStation = workStation
current_step = last_step
# the entity is in a buffer if the step_index is no larger than the length of the route
elif len(route)-1>=step_index:
current_step = route[step_index+1]
currentStation = current_step["stationIdsList"][0]
# the entity has concluded it's route; it should be removed from the WIP
else:
wipToBeRemoved.append(componentID)
# # check if this part is a design and update the flag
if any(station.startswith("OD") for station in route[-1]["stationIdsList"]):
designComplete = True
# # add the part to the completedComponents list if it is not mould or design
if not any(station.startswith("OD") for station in route[-1]["stationIdsList"]) and\
not any(station.startswith("E") for station in route[-1]["stationIdsList"]):
completedComponents.append(componentID)
# if the entity is still in the system then update the WIP info
if not componentID in wipToBeRemoved:
wip[componentID]["station"] = currentStation
wip[componentID]["sequence"] = current_step["sequence"]
wip[componentID]["task_id"] = current_step["task_id"]
if remainingProcessingTime:
wip[componentID]["remainingProcessingTime"] = {"Fixed": {"mean": remainingProcessingTime}}
# if the entity is not recognized within the current WIP then check if it should be created
# first the flag designComplete and the completedComponents list must be updated
for component in orderComponents:
componentID = component["id"]
route = component["route"]
if not componentID in self.getWIPIds():
insertWIPitem = False
# # if the design is complete
if designComplete:
# # if the component is not a mould then put in the first step of its route
if not any(station.startswith("E") for station in route[-1]["stationIdsList"]):
insertWIPitem = True
# # if the design is not complete
else:
# # if the component is design then put it at the start of its route
if any(station.startswith("OD") for station in route[-1]["stationIdsList"]):
insertWIPitem = True
# # if the completed components include all the components (exclude mould and design)
if len(completedComponents) == len(orderComponents)-2:
# # if the component is a mould then put it in the first step of it's route
if any(station.startswith("E") for station in route[-1]["stationIdsList"]):
insertWIPitem = True
if insertWIPitem:
if not wip.get(componentID, {}):
wip[componentID] = {}
wip[componentID]["station"] = route[0]["stationIdsList"][0]
wip[componentID]["sequence"] = route[0]["sequence"]
wip[componentID]["task_id"] = route[0]["task_id"]
# remove the idle entities
for entityID in wipToBeRemoved:
assert wip.pop(entityID, None), "while trying to remove WIP that has concluded it's route, nothing is removed"
return data
if __name__ == '__main__':
pass | gpl-3.0 | -1,498,681,226,819,017,000 | 48.837607 | 210 | 0.630703 | false | 4.289919 | false | false | false |
whitepyro/debian_server_setup | sickbeard/providers/thepiratebay.py | 1 | 15022 | # Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import time
import re
import urllib, urllib2, urlparse
import sys
import os
import datetime
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import db
from sickbeard import classes
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import clients
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
from lib import requests
from lib.requests import exceptions
from lib.unidecode import unidecode
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.confirmed = False
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.proxy = ThePirateBayWebproxy()
self.url = 'http://pirateproxy.net/'
self.searchurl = self.url + 'search/%s/0/7/200' # order by seed
self.re_title_url = '/torrent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'thepiratebay.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _reverseQuality(self, quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = 'HDTV x264'
if quality == Quality.SDDVD:
quality_string = 'DVDRIP'
elif quality == Quality.HDTV:
quality_string = '720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = '1080p HDTV x264'
elif quality == Quality.RAWHDTV:
quality_string = '1080i HDTV mpeg2'
elif quality == Quality.HDWEBDL:
quality_string = '720p WEB-DL h264'
elif quality == Quality.FULLHDWEBDL:
quality_string = '1080p WEB-DL h264'
elif quality == Quality.HDBLURAY:
quality_string = '720p Bluray x264'
elif quality == Quality.FULLHDBLURAY:
quality_string = '1080p Bluray x264'
return quality_string
def _find_season_quality(self, title, torrent_id, ep_number):
""" Return the modified title of a Season Torrent with the quality found inspecting torrent file list """
mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
'vob', 'dvr-ms', 'wtv', 'ts'
'ogv', 'rar', 'zip', 'mp4']
quality = Quality.UNKNOWN
fileName = None
fileURL = self.proxy._buildURL(self.url + 'ajax_details_filelist.php?id=' + str(torrent_id))
if self.proxy and self.proxy.isEnabled():
self.headers.update({'referer': self.proxy.getProxyURL()})
data = self.getURL(fileURL)
if not data:
return None
filesList = re.findall('<td.+>(.*?)</td>', data)
if not filesList:
logger.log(u"Unable to get the torrent file list for " + title, logger.ERROR)
videoFiles = filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList)
#Filtering SingleEpisode/MultiSeason Torrent
if len(videoFiles) < ep_number or len(videoFiles) > float(ep_number * 1.1):
logger.log(
u"Result " + title + " have " + str(ep_number) + " episode and episodes retrived in torrent are " + str(
len(videoFiles)), logger.DEBUG)
logger.log(u"Result " + title + " Seem to be a Single Episode or MultiSeason torrent, skipping result...",
logger.DEBUG)
return None
if Quality.sceneQuality(title) != Quality.UNKNOWN:
return title
for fileName in videoFiles:
quality = Quality.sceneQuality(os.path.basename(fileName))
if quality != Quality.UNKNOWN: break
if fileName is not None and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(os.path.basename(fileName))
if quality == Quality.UNKNOWN:
logger.log(u"Unable to obtain a Season Quality for " + title, logger.DEBUG)
return None
try:
myParser = NameParser(showObj=self.show)
parse_result = myParser.parse(fileName)
except (InvalidNameException, InvalidShowException):
return None
logger.log(u"Season quality for " + title + " is " + Quality.qualityStrings[quality], logger.DEBUG)
if parse_result.series_name and parse_result.season_number:
title = parse_result.series_name + ' S%02d' % int(parse_result.season_number) + ' ' + self._reverseQuality(
quality)
return title
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%02d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.scene_season) + ' -Ep*'
search_string['Season'].append(ep_string)
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', ' ')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%02i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if self.proxy and self.proxy.isEnabled():
self.headers.update({'referer': self.proxy.getProxyURL()})
for mode in search_params.keys():
for search_string in search_params[mode]:
if mode != 'RSS':
searchURL = self.proxy._buildURL(self.searchurl % (urllib.quote(unidecode(search_string))))
else:
searchURL = self.proxy._buildURL(self.url + 'tv/latest/')
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
re_title_url = self.proxy._buildRE(self.re_title_url)
#Extracting torrent information from data returned by searchURL
match = re.compile(re_title_url, re.DOTALL).finditer(urllib.unquote(data))
for torrent in match:
title = torrent.group('title').replace('_',
'.') #Do not know why but SickBeard skip release with '_' in name
url = torrent.group('url')
id = int(torrent.group('id'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
#Filter unseeded torrent
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
#Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search('(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
logger.log(u"ThePirateBay Provider found result " + torrent.group(
'title') + " but that doesn't seem like a trusted result so I'm ignoring it", logger.DEBUG)
continue
#Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
if mode == 'Season' and search_mode == 'sponly':
ep_number = int(epcount / len(set(allPossibleShowNames(self.show))))
title = self._find_season_quality(title, id, ep_number)
if not title or not url:
continue
item = title, url, id, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = u'' + title.replace(' ', '.')
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['rss']}
return self.provider._doSearch(search_params)
class ThePirateBayWebproxy:
def __init__(self):
self.Type = 'GlypeProxy'
self.param = 'browse.php?u='
self.option = '&b=32'
self.enabled = False
self.url = None
self.urls = {
'Getprivate.eu (NL)': 'http://getprivate.eu/',
'15bb51.info (US)': 'http://15bb51.info/',
'Hideme.nl (NL)': 'http://hideme.nl/',
'Proxite.eu (DE)': 'http://proxite.eu/',
'Webproxy.cz (CZ)': 'http://webproxy.cz/',
'2me2u (CZ)': 'http://2me2u.me/',
'Interproxy.net (EU)': 'http://interproxy.net/',
'Unblockersurf.info (DK)': 'http://unblockersurf.info/',
'Hiload.org (NL)': 'http://hiload.org/',
}
def isEnabled(self):
""" Return True if we Choose to call TPB via Proxy """
return self.enabled
def getProxyURL(self):
""" Return the Proxy URL Choosen via Provider Setting """
return str(self.url)
def _buildURL(self, url):
""" Return the Proxyfied URL of the page """
if self.isEnabled():
url = self.getProxyURL() + self.param + url + self.option
return url
def _buildRE(self, regx):
""" Return the Proxyfied RE string """
if self.isEnabled():
regx = re.sub('//1', self.option, regx).replace('&', '&')
else:
regx = re.sub('//1', '', regx)
return regx
provider = ThePirateBayProvider()
| gpl-3.0 | 5,021,381,175,579,323,000 | 37.419437 | 143 | 0.569498 | false | 3.957323 | false | false | false |
edx/edx-enterprise | integrated_channels/degreed/exporters/learner_data.py | 1 | 3284 | # -*- coding: utf-8 -*-
"""
Learner data exporter for Enterprise Integrated Channel Degreed.
"""
from datetime import datetime
from logging import getLogger
from django.apps import apps
from integrated_channels.catalog_service_utils import get_course_id_for_enrollment
from integrated_channels.integrated_channel.exporters.learner_data import LearnerExporter
from integrated_channels.utils import generate_formatted_log
LOGGER = getLogger(__name__)
class DegreedLearnerExporter(LearnerExporter):
"""
Class to provide a Degreed learner data transmission audit prepared for serialization.
"""
def get_learner_data_records(
self,
enterprise_enrollment,
completed_date=None,
is_passing=False,
**kwargs
): # pylint: disable=arguments-differ,unused-argument
"""
Return a DegreedLearnerDataTransmissionAudit with the given enrollment and course completion data.
If completed_date is None, then course completion has not been met.
If no remote ID can be found, return None.
"""
# Degreed expects completion dates of the form 'yyyy-mm-dd'.
completed_timestamp = completed_date.strftime("%F") if isinstance(completed_date, datetime) else None
if enterprise_enrollment.enterprise_customer_user.get_remote_id() is not None:
DegreedLearnerDataTransmissionAudit = apps.get_model( # pylint: disable=invalid-name
'degreed',
'DegreedLearnerDataTransmissionAudit'
)
# We return two records here, one with the course key and one with the course run id, to account for
# uncertainty about the type of content (course vs. course run) that was sent to the integrated channel.
return [
DegreedLearnerDataTransmissionAudit(
enterprise_course_enrollment_id=enterprise_enrollment.id,
degreed_user_email=enterprise_enrollment.enterprise_customer_user.user_email,
course_id=get_course_id_for_enrollment(enterprise_enrollment),
course_completed=completed_date is not None and is_passing,
completed_timestamp=completed_timestamp,
),
DegreedLearnerDataTransmissionAudit(
enterprise_course_enrollment_id=enterprise_enrollment.id,
degreed_user_email=enterprise_enrollment.enterprise_customer_user.user_email,
course_id=enterprise_enrollment.course_id,
course_completed=completed_date is not None and is_passing,
completed_timestamp=completed_timestamp,
)
]
LOGGER.info(generate_formatted_log(
'degreed',
enterprise_enrollment.enterprise_customer_user.enterprise_customer.uuid,
enterprise_enrollment.enterprise_customer_user.user_id,
None,
('get_learner_data_records finished. No learner data was sent for this LMS User Id because '
'Degreed User ID not found for [{name}]'.format(
name=enterprise_enrollment.enterprise_customer_user.enterprise_customer.name
))))
return None
| agpl-3.0 | -1,071,006,453,224,120,000 | 45.253521 | 116 | 0.654385 | false | 4.605891 | false | false | false |
jaakkojulin/potku | Widgets/MatplotlibImportTimingWidget.py | 1 | 6288 | # coding=utf-8
'''
Created on 6.6.2013
Updated on 29.8.2013
Potku is a graphical user interface for analyzation and
visualization of measurement data collected from a ToF-ERD
telescope. For physics calculations Potku uses external
analyzation components.
Copyright (C) Timo Konu
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program (file named 'LICENCE').
'''
__author__ = "Timo Konu"
__versio__ = "1.0"
from PyQt4 import QtGui
from Widgets.MatplotlibWidget import MatplotlibWidget
class MatplotlibImportTimingWidget(MatplotlibWidget):
def __init__(self, parent, output_file, icon_manager, timing):
'''Inits import timings widget
Args:
parent: An ImportTimingGraphDialog class object.
output_file: A string representing file to be graphed.
icon_manager: An IconManager class object.
timing: A tuple representing low & high timing limits.
'''
super().__init__(parent)
super().fork_toolbar_buttons()
self.canvas.manager.set_title("Import coincidence timing")
self.icon_manager = icon_manager
# TODO: Multiple timings ?
timing_key = list(timing.keys())[0]
self.__limit_low, self.__limit_high = timing[timing_key]
self.__title = self.main_frame.windowTitle()
self.__fork_toolbar_buttons()
self.canvas.mpl_connect('button_press_event', self.on_click)
self.main_frame.setWindowTitle("{0} - Timing: ADC {3} ({1},{2})".format(
self.__title,
self.__limit_low,
self.__limit_high,
timing_key))
self.__limit_prev = 0
self.data = []
with open(output_file) as fp:
for line in fp:
if not line: # Can still result in empty lines at the end, skip.
continue
split = line.strip().split("\t")
time_diff = int(split[3])
# if time_diff < 0:
# time_diff *= -1
self.data.append(time_diff)
self.data = sorted(self.data)
self.on_draw()
def on_draw(self):
'''Draws the timings graph
'''
self.axes.clear()
self.axes.hist(self.data, 200, facecolor='green', histtype='stepfilled')
self.axes.set_yscale('log', nonposy='clip')
self.axes.set_xlabel("Timedifference (µs?)")
self.axes.set_ylabel("Count (?)")
if self.__limit_low:
self.axes.axvline(self.__limit_low, linestyle="--")
if self.__limit_high:
self.axes.axvline(self.__limit_high, linestyle="--")
self.remove_axes_ticks()
self.canvas.draw_idle()
def on_click(self, event):
'''Handles clicks on the graph.
Args:
event: A click event on the graph
'''
if event.button == 1 and self.limButton.isChecked():
value = int(event.xdata)
if value == self.__limit_high or value == self.__limit_low:
return
if self.__limit_prev:
self.__limit_high = value
self.__limit_prev = 0
else:
self.__limit_low = value
self.__limit_prev = 1
# Check these values are correctly ordered
if self.__limit_high < self.__limit_low:
self.__limit_low, self.__limit_high = \
self.__limit_high, self.__limit_low
# Set values to parent dialog (main_frame = ImportTimingGraphDialog)
self.main_frame.timing_low.setValue(self.__limit_low)
self.main_frame.timing_high.setValue(self.__limit_high)
self.main_frame.setWindowTitle("{0} - Timing: ({1},{2})".format(
self.__title,
self.__limit_low,
self.__limit_high))
self.on_draw()
def __fork_toolbar_buttons(self):
'''Custom toolbar buttons be here.
'''
self.__tool_label = self.mpl_toolbar.children()[24]
self.__button_drag = self.mpl_toolbar.children()[12]
self.__button_zoom = self.mpl_toolbar.children()[14]
self.__button_drag.clicked.connect(self.__uncheck_custom_buttons)
self.__button_zoom.clicked.connect(self.__uncheck_custom_buttons)
self.limButton = QtGui.QToolButton(self)
self.limButton.clicked.connect(self.__limit_button_click)
self.limButton.setCheckable(True)
self.limButton.setToolTip("Change timing's low and high limits for more accurate coincidence reading.")
self.icon_manager.set_icon(self.limButton, "amarok_edit.svg")
self.mpl_toolbar.addWidget(self.limButton)
def __limit_button_click(self):
'''Click event on limit button.
'''
if self.limButton.isChecked():
self.__uncheck_built_in_buttons()
self.__tool_label.setText("timing limit tool")
self.mpl_toolbar.mode = "timing limit tool"
else:
self.__tool_label.setText("")
self.mpl_toolbar.mode = ""
def __uncheck_custom_buttons(self):
self.limButton.setChecked(False)
def __uncheck_built_in_buttons(self):
self.__button_drag.setChecked(False)
self.__button_zoom.setChecked(False)
self.__tool_label.setText("")
self.mpl_toolbar.mode = ""
| gpl-2.0 | 6,802,219,075,643,110,000 | 37.808642 | 111 | 0.561794 | false | 4.250845 | false | false | false |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/keyvault/security_domain/security_domain.py | 3 | 1632 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
class Key: # pylint: disable=too-few-public-methods
def __init__(self, enc_key=None, x5t_256=None):
self.enc_key = enc_key
self.x5t_256 = x5t_256
def to_json(self):
return {
'enc_key': self.enc_key if self.enc_key else '',
'x5t_256': self.x5t_256 if self.x5t_256 else ''
}
class EncData: # pylint: disable=too-few-public-methods
def __init__(self):
self.data = []
self.kdf = None
def to_json(self):
return {
'data': [x.to_json() for x in self.data],
'kdf': self.kdf if self.kdf else ''
}
class Datum: # pylint: disable=too-few-public-methods
def __init__(self, compact_jwe=None, tag=None):
self.compact_jwe = compact_jwe
self.tag = tag
def to_json(self):
return {
'compact_jwe': self.compact_jwe if self.compact_jwe else '',
'tag': self.tag if self.tag else ''
}
class SecurityDomainRestoreData: # pylint: disable=too-few-public-methods
def __init__(self):
self.enc_data = EncData()
self.wrapped_key = Key()
def to_json(self):
return {
'EncData': self.enc_data.to_json(),
'WrappedKey': self.wrapped_key.to_json()
}
| mit | 1,255,597,909,136,702,700 | 30.384615 | 94 | 0.505515 | false | 3.659193 | false | false | false |
zyga/schnibble | schnibble/arch/msp430/registers.py | 1 | 1087 | """MSP430 registers."""
class R0(object):
"""
Register 0, program counter.
Points to the next instruction to be executed.
Bit 0 is always unset.
"""
index = 0
class R1(object):
"""Register 1, stack pointer."""
index = 1
class R2(object):
"""Register 2, status."""
index = 2
class R3(object):
"""Register 3, constant generator."""
index = 3
class R4(object):
"""Register 4."""
index = 4
class R5(object):
"""Register 5."""
index = 5
class R6(object):
"""Register 6."""
index = 6
class R7(object):
"""Register 7."""
index = 7
class R8(object):
"""Register 8."""
index = 8
class R9(object):
"""Register 9."""
index = 9
class R10(object):
"""Register 10."""
index = 10
class R11(object):
"""Register 10."""
index = 11
class R12(object):
"""Register 12."""
index = 12
class R13(object):
"""Register 13."""
index = 13
class R14(object):
"""Register 14."""
index = 14
class R15(object):
"""Register 15."""
index = 15
| gpl-3.0 | -8,235,729,149,018,171,000 | 11.639535 | 50 | 0.538178 | false | 3.506452 | false | false | false |
us-ignite/us_ignite | us_ignite/actionclusters/forms.py | 1 | 4590 | from urlparse import urlparse, parse_qs
from django import forms
from django.contrib.auth.models import User
from django.core.validators import validate_email
from django.forms.models import inlineformset_factory
from django.utils import html
from us_ignite.actionclusters.models import (
ActionCluster,
ActionClusterURL,
ActionClusterMedia,
ActionClusterMembership,
)
from us_ignite.common import output
def _get_status_choices():
"""Returns a list of valid user status for the ``ActionCluster``"""
available_status = [
ActionCluster.PUBLISHED,
ActionCluster.DRAFT,
]
is_valid_status = lambda x: x[0] in available_status
return filter(is_valid_status, ActionCluster.STATUS_CHOICES)
class ActionClusterForm(forms.ModelForm):
"""Model form for the ``ActionCluster`` with whitelisted fields."""
status = forms.ChoiceField(
choices=_get_status_choices(), initial=ActionCluster.DRAFT)
summary = forms.CharField(
max_length=140, widget=forms.Textarea,
help_text='Tweet-length pitch / summary of project.')
class Meta:
model = ActionCluster
fields = ('name', 'summary', 'impact_statement',
'image', 'domain', 'features', 'stage', 'needs_partner',
'assistance', 'team_name', 'team_description',
'awards', 'acknowledgments', 'tags', 'status',)
widgets = {
'features': forms.CheckboxSelectMultiple(),
}
def _strip_tags(self, field):
if field in self.cleaned_data:
return html.strip_tags(self.cleaned_data[field])
def clean_team_description(self):
return self._strip_tags('team_description')
def clean_tags(self):
if 'tags' in self.cleaned_data:
return output.prepare_tags(self.cleaned_data['tags'])
ActionClusterLinkFormSet = inlineformset_factory(
ActionCluster, ActionClusterURL, max_num=3, extra=3, can_delete=False)
def is_embedable_url(url):
domain_list = ['www.youtube.com']
url_parsed = urlparse(url)
if url_parsed.netloc.lower() in domain_list:
query = parse_qs(url_parsed.query)
return True if query.get('v') else False
return False
class ActionClusterMediaForm(forms.ModelForm):
def clean_url(self):
url = self.cleaned_data.get('url')
if url:
if is_embedable_url(url):
return url
raise forms.ValidationError('Not valid URL.')
return ''
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('url') or cleaned_data.get('image'):
return self.cleaned_data
raise forms.ValidationError('An image or a URL is required.')
class Meta:
fields = ('name', 'image', 'url')
model = ActionClusterMedia
ActionClusterMediaFormSet = inlineformset_factory(
ActionCluster, ActionClusterMedia, max_num=10, extra=1,
can_delete=False, form=ActionClusterMediaForm)
def validate_member(email):
"""Validates the user has a valid email and it is registered."""
try:
validate_email(email)
except forms.ValidationError:
raise forms.ValidationError(
'``%s`` is an invalid email address.' % email)
try:
return User.objects.get(email=email)
except User.DoesNotExist:
raise forms.ValidationError(
'User with ``%s`` email is not registered.' % email)
class MembershipForm(forms.Form):
"""Form to validate the collaborators."""
collaborators = forms.CharField(
label=u'Team Members',
widget=forms.Textarea, help_text=u'Add registered users as '
'collaborators for this app. One email per line.', required=False)
def clean_collaborators(self):
"""Validates the payload is a list of registered usernames."""
collaborators_raw = self.cleaned_data.get('collaborators')
member_list = []
if collaborators_raw:
collaborator_list = [c for c in collaborators_raw.splitlines() if c]
for collaborator in collaborator_list:
collaborator = collaborator.strip()
member = validate_member(collaborator)
member_list.append(member)
return member_list
class ActionClusterMembershipForm(forms.ModelForm):
class Meta:
model = ActionClusterMembership
fields = ('can_edit', )
ActionClusterMembershipFormSet = inlineformset_factory(
ActionCluster, ActionClusterMembership, extra=0, max_num=0,
form=ActionClusterMembershipForm)
| bsd-3-clause | -1,189,987,755,096,135,400 | 31.785714 | 80 | 0.657516 | false | 4.051192 | false | false | false |
veteman/thepython2blob | polynom.py | 1 | 12360 | #Copyright 2015 B. Johan G. Svensson
#Licensed under the terms of the MIT license (see LICENSE).
'''
#Generic methods and wrappers
removeunused() Polynom only
copy()
degree()
iszero()
neg() Polynom only
#getcoeff/setcoeff - normally only for internal use
getcoeff(n)
setcoeff(n, val)
#Four basic arithmetic operations:
add(other)
sub(other)
mul(other)
div(other) - returns (quotient, remainder)
#Special functions - external use only:
divprintstep(other) Polybinary only
divprintstep2(other) Polybinary only
getfibonaccileftshftpoly() Polybinary only
getfibonaccirightshftpoly() Polybinary only
getgaloisleftshftpoly() Polybinary only
getgaloisrightshftpoly() Polybinary only
#Display function used by __repren__(self)
repren()
'''
class Polynom(object):
#YOU are responsible that the coefficients work!
def __init__(self, inpt):
self.coeff = list(inpt)
self.removeunused()
########
#Generic methods and wrappers
def removeunused(self):
for i in xrange(len(self.coeff)-1,-1,-1):
if self.coeff[i] == 0:
self.coeff.pop()
else:
break
def copy(self):
return Polynom(self.coeff[:])
def degree(self):
self.removeunused()
deg = len(self.coeff) - 1
if deg >= 0:
return deg
else:
# Wikipedia says -Inf is 'convenient' http://en.wikipedia.org/wiki/Degree_of_a_polynomial#Degree_of_the_zero_polynomial
return -float('infinity')
def iszero(self):
for coeff in self.coeff:
if coeff != 0:
return False
return True
def neg(self):
return Polynom([0]).sub(self)
########
#getcoeff/setcoeff - normally only only used by add, sub, mul, div
def getcoeff(self,n):
try:
return self.coeff[n]
except IndexError:
return 0
def setcoeff(self,n, val):
try:
self.coeff[n] = val
except IndexError:
if val != 0:
self.coeff.extend([0] * (1 + n - len(self.coeff)))
self.coeff[n] = val
########
#Four basic arithmetic operations:
def add(self, other):
if isinstance(other, (int, long, float)):
other = Polynom([other])
maxdeg = max(self.degree(), other.degree())
newpoly = []
for i in xrange(maxdeg + 1):
newpoly.append(self.getcoeff(i)+other.getcoeff(i))
return Polynom(newpoly)
def sub(self, other):
if isinstance(other, (int, long, float)):
other = Polynom([other])
maxdeg = max(self.degree(), other.degree())
newpoly = []
for i in xrange(maxdeg + 1):
newpoly.append(self.getcoeff(i)-other.getcoeff(i))
return Polynom(newpoly)
def mul(self, other):
if isinstance(other, (int, long, float)):
other = Polynom([other])
newdegree = self.degree() + other.degree()
newpoly = [0]*(newdegree + 1)
for i in xrange(self.degree() + 1):
for j in xrange(other.degree() + 1):
newpoly[i+j] += self.getcoeff(i)*other.getcoeff(j)
return Polynom(newpoly)
def div(self, other):
if other.iszero():
raise ZeroDivisionError
q = Polynom([0])
r = self.copy()
d = other.copy()
while not(r.iszero()) and r.degree() >= d.degree():
deg = r.degree() - d.degree()
coef = r.getcoeff(r.degree())/d.getcoeff(d.degree())
tlst = [0] * (1 + deg)
tlst[-1] = coef
t = Polynom(tlst)
q += t
r -= t.mul(d)
return (q, r)
########
#Special functions - external use only:
def divprintstep(self, other):
if other.iszero():
raise ZeroDivisionError
q = Polynom([0])
r = self.copy()
d = other.copy()
print 'Printing long division:'
print '=',r
while not(r.iszero()) and r.degree() >= d.degree():
deg = r.degree() - d.degree()
coef = r.getcoeff(r.degree())/d.getcoeff(d.degree())
tlst = [0] * (1 + deg)
tlst[-1] = coef
t = Polynom(tlst)
q += t
r -= t.mul(d)
print '',t.mul(d), ' = ', t, ' * (', d, ')'
print '--------'
print '=',r
return (q, r)
########
#Display function used by __repr__(self)
def repren(self):
strng = ''
coefflst = []
for i in xrange(len(self.coeff)-1,-1,-1):
#
if self.coeff[i] == 0:
continue
#
if i == len(self.coeff) - 1 and self.coeff[i] >= 0:
strsign = ''
elif self.coeff[i] < 0:
strsign = '-'
else:
strsign = '+'
#
if i == 0 or abs(self.coeff[i]) != 1:
strcoeff = str(abs(self.coeff[i]))
else:
strcoeff = ''
#
if i == 0:
strx = ''
elif i == 1:
strx = 'x'
else:
strx = 'x**' + str(i)
if len(strng) > 0:
strng += ' '
if len(strsign) > 0:
strng += strsign + ' '
if len(strcoeff) > 0 and len(strx) > 0:
strng += strcoeff + ' * ' + strx
else:
strng += strcoeff + strx
if len(strng) == 0:
strng = '0'
return strng
################
####Operator methods
#Basic customization
def __repr__(self):
return self.repren()
## def __str__(self):
## return ''
## def __unicode__(self):
## return u''
## def __lt__(self, other):
## return NotImplemented
## def __le__(self, other):
## return NotImplemented
## def __eq__(self, other):
## return NotImplemented
## def __ne__(self, other):
## return NotImplemented
## def __gt__(self, other):
## return NotImplemented
## def __ge__(self, other):
## return NotImplemented
## def __hash__(self):
## return None
## def __nonzero__(self):
## return True
#Customizing attribute access
## def __getattr__(self, name):
## return NotImplemented
## def __setattr__(self, name, value):
## return NotImplemented
## def __delattr__(self, name):
## return NotImplemented
## def __getattribute__(self, name):
## return NotImplemented
## def __get__(self, instance, owner):
## return NotImplemented
## def __set__(self, instance, value):
## return NotImplemented
## def __delete__(self, instance):
## return NotImplemented
#Emulating callable objects
## def __call__(self, *args):
## pass
#Emulating container types
## def __len__(self):
## pass
## def __getitem__(self, key):
## pass
## def __setitem__(self, key, value):
## pass
## def __delitem__(self, key):
## pass
## def __iter__(self):
## pass
## def __reversed__(self):
## pass
## def __contains__(self, item):
## pass
#Additional methods for emulation of sequence types
#DEPRECATED since version 2.0 - __getitem__, __setitem__ and __delitem__ above
## def __getslice__(self, i, j):
## pass
## def __setslice__(self, i, j, sequence):
## pass
## def __delslice__(self, i, j):
## pass
#Emulating numeric types
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.sub(other)
def __mul__(self, other):
return self.mul(other)
def __floordiv__(self, other):
return self.div(other)[0]
def __mod__(self, other):
return self.div(other)[1]
def __divmod__(self, other):
return self.div(other)
## def __pow__(self, other, modulo = None):
## return NotImplemented
## def __lshift__(self, other):
## return NotImplemented
## def __rshift__(self, other):
## return NotImplemented
## def __and__(self, other):
## return NotImplemented
## def __xor__(self, other):
## return NotImplemented
## def __or__(self, other):
## return NotImplemented
def __div__(self, other):
return self.div(other)[0]
def __truediv__(self, other):
return self.div(other)[0]
def __radd__(self, other):
try:
return other.add(self)
except AttributeError:
return Polynom([other]).add(self)
def __rsub__(self, other):
try:
return other.sub(self)
except AttributeError:
return Polynom([other]).sub(self)
def __rmul__(self, other):
try:
return other.mul(self)
except AttributeError:
return Polynom([other]).mul(self)
def __rdiv__(self, other):
#other / self
try:
return other.div(self)[0]
except AttributeError:
return Polynom([other]).div(self)[0]
def __rtruediv__(self, other):
try:
return other.div(self)[0]
except AttributeError:
return Polynom([other]).div(self)[0]
def __rfloordiv__(self, other):
try:
return other.div(self)[0]
except AttributeError:
return Polynom([other]).div(self)[0]
def __rmod__(self, other):
try:
return other.div(self)[1]
except AttributeError:
return Polynom([other]).div(self)[1]
def __rdivmod__(self, other):
try:
return other.div(self)
except AttributeError:
return Polynom([other]).div(self)
## def __rpow__(self, other):
## return NotImplemented
## def __rlshift__(self, other):
## return NotImplemented
## def __rrshift__(self, other):
## return NotImplemented
## def __rand__(self, other):
## return NotImplemented
## def __rxor__(self, other):
## return NotImplemented
## def __ror__(self, other):
## return NotImplemented
#Correct behavour of i-methods - Modify self and return the result
#If not possible do not define these - will be handled automatically
## def __iadd__(self, other):
## return NotImplemented
## def __isub__(self, other):
## return NotImplemented
## def __imul__(self, other):
## return NotImplemented
## def __idiv__(self, other):
## return NotImplemented
## def __itruediv__(self, other):
## return NotImplemented
## def __ifloordiv__(self, other):
## return NotImplemented
## def __imod__(self, other):
## return NotImplemented
## def __ipow__(self, other, modulo = None):
## return NotImplemented
## def __ilshift__(self, other):
## return NotImplemented
## def __irshift__(self, other):
## return NotImplemented
## def __iand__(self, other):
## return NotImplemented
## def __ixor__(self, other):
## return NotImplemented
## def __ior__(self, other):
## return NotImplemented
def __neg__(self):
return Polynom([0]) - self
def __pos__(self):
return self.copy()
## def __abs__(self):
## pass
## def __invert__(self):
## pass
## def __complex__(self):
## pass
## def __int__(self):
## pass
## def __long__(self):
## pass
## def __float__(self):
## pass
## def __oct__(self):
## pass
## def __hex__(self):
## pass
## def __index__(self):
## pass
## def __coerce__(self, other):
## pass
#With Statement Context Managers
## def __enter__(self):
## pass
## def __exit__(self, exc_type, exc_value, traceback):
## pass
| mit | 6,761,289,637,767,737,000 | 29.611253 | 131 | 0.498463 | false | 3.693963 | false | false | false |
rackerlabs/django-DefectDojo | dojo/tools/nikto/parser.py | 2 | 5143 | __author__ = 'aaronweaver'
import re
from defusedxml import ElementTree as ET
import hashlib
from urllib.parse import urlparse
from dojo.models import Finding, Endpoint
class NiktoXMLParser(object):
def __init__(self, filename, test):
dupes = dict()
self.items = ()
if filename is None:
self.items = ()
return
tree = ET.parse(filename)
root = tree.getroot()
scan = root.find('scandetails')
# New versions of Nikto have a new file type (nxvmlversion="1.2") which adds an additional niktoscan tag
# This find statement below is to support new file format while not breaking older Nikto scan files versions.
if scan is None:
scan = root.find('./niktoscan/scandetails')
for item in scan.findall('item'):
# Title
titleText = None
description = item.find("description").text
# Cut the title down to the first sentence
sentences = re.split(
r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description)
if len(sentences) > 0:
titleText = sentences[0][:900]
else:
titleText = description[:900]
# Url
ip = item.find("iplink").text
# Remove the port numbers for 80/443
ip = ip.replace(":80", "")
ip = ip.replace(":443", "")
# Severity
severity = "Info" # Nikto doesn't assign severity, default to Info
# Description
description = "\n \n".join((("Host: " + ip),
("Description: " + item.find("description").text),
("HTTP Method: " + item.attrib["method"]),
))
mitigation = "N/A"
impact = "N/A"
references = "N/A"
dupe_key = hashlib.md5(description.encode("utf-8")).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
if finding.description:
finding.description = finding.description + "\nHost:" + ip + "\n" + description
self.process_endpoints(finding, ip)
dupes[dupe_key] = finding
else:
dupes[dupe_key] = True
finding = Finding(title=titleText,
test=test,
active=False,
verified=False,
description=description,
severity=severity,
numerical_severity=Finding.get_numerical_severity(
severity),
mitigation=mitigation,
impact=impact,
references=references,
url='N/A',
dynamic_finding=True)
dupes[dupe_key] = finding
self.process_endpoints(finding, ip)
self.items = list(dupes.values())
def process_endpoints(self, finding, host):
protocol = "http"
query = ""
fragment = ""
path = ""
url = urlparse(host)
if url:
path = url.path
rhost = re.search(
"(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.(com|edu|gov|int|mil|net|org|biz|arpa|info|name|pro|aero|coop|museum|[a-zA-Z]{2}))[\:]*([0-9]+)*([/]*($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+)).*?$",
host)
protocol = rhost.group(1)
host = rhost.group(4)
try:
dupe_endpoint = Endpoint.objects.get(protocol="protocol",
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
except Endpoint.DoesNotExist:
dupe_endpoint = None
if not dupe_endpoint:
endpoint = Endpoint(protocol=protocol,
host=host,
query=query,
fragment=fragment,
path=path,
product=finding.test.engagement.product)
else:
endpoint = dupe_endpoint
if not dupe_endpoint:
endpoints = [endpoint]
else:
endpoints = [endpoint, dupe_endpoint]
finding.unsaved_endpoints = finding.unsaved_endpoints + endpoints
| bsd-3-clause | -7,994,457,130,637,868,000 | 38.561538 | 534 | 0.445849 | false | 4.036892 | false | false | false |
Alshain-Oy/Cloudsnake-Application-Server | code_examples/libHashRing_caps.py | 1 | 2782 | #!/usr/bin/env python
# Cloudsnake Application server
# Licensed under Apache License, see license.txt
# Author: Markus Gronholm <[email protected]> Alshain Oy
#import hashlib, bisect, copy
class HashRing( object ):
def __init__( self, replicas = 1, track_changes = False ):
self.replicas = replicas
self.ring = {}
self.keys = []
self.invert_ring = {}
self.accessed_keys = set()
self.key_mapping = {}
self.saved_mapping = {}
self.invalid_nodes = set()
self.track_changes = track_changes
def invalidate_node( self, node ):
self.invalid_nodes.add( node )
def validate_node( self, node ):
self.invalid_nodes.discard( node )
def get_invalid_keys( self ):
out = []
for node in self.invalid_nodes:
out.extend( self.invert_ring[ node ] )
return out
def save_state( self ):
for (key, item) in self.key_mapping.items():
self.saved_mapping[ key ] = item
def generate_key( self, key ):
return cloudSnake.modules.hashlib.md5( key ).hexdigest()
def compute_changes( self ):
self.compute_mapping()
changes = []
for key in self.accessed_keys:
if self.saved_mapping[ key ] != self.key_mapping[ key ]:
changes.append( ( key, self.saved_mapping[ key ], self.key_mapping[ key ] ) )
return changes
def add_node( self, node ):
self.invert_ring[ node ] = []
if self.track_changes:
self.save_state()
for i in range( self.replicas ):
key = self.generate_key( str( i ) + "+" + str( node ) )
self.ring[ key ] = node
cloudSnake.modules.bisect.insort( self.keys, key )
self.invert_ring[ node ].append( key )
if self.track_changes:
return self.compute_changes()
return True
def remove_node( self, node ):
if self.track_changes:
self.save_state()
keys = self.invert_ring[ node ]
for key in keys:
self.keys.remove( key )
del self.invert_ring[ node ]
if self.track_changes:
return self.compute_changes()
else:
return True
def _raw_get_node( self, key ):
pos = cloudSnake.modules.bisect.bisect_right( self.keys, key )
node_key = self.keys[ pos - 1 ]
return self.ring[ node_key ]
def get_node( self, skey ):
key = self.generate_key( skey )
self.accessed_keys.add( key )
valid_keys = self.keys
invalid_keys = self.get_invalid_keys()
for ikey in invalid_keys:
valid_keys.remove( ikey )
pos = cloudSnake.modules.bisect.bisect_right( valid_keys, key )
node_key = valid_keys[ pos - 1 ]
if self.track_changes:
self.key_mapping[ key ] = self.ring[ node_key ]
return self.ring[ node_key ]
def get_keys_for_node( self, node ):
return self.invert_ring[ node ]
def compute_mapping( self ):
for key in self.accessed_keys:
self.key_mapping[ key ] = self._raw_get_node( key )
| apache-2.0 | 5,255,672,477,656,797,000 | 21.435484 | 81 | 0.646298 | false | 2.978587 | false | false | false |
alirizakeles/tendenci | tendenci/apps/site_settings/context_processors.py | 1 | 3303 | from django.core.cache import cache
from django.conf import settings as d_settings
from django.template import Context, Template, TemplateDoesNotExist
from django.template.loader import get_template
from tendenci import __version__ as version
from tendenci.apps.site_settings.models import Setting
from tendenci.apps.site_settings.cache import SETTING_PRE_KEY
def settings(request):
"""Context processor for settings
"""
key = [d_settings.CACHE_PRE_KEY, SETTING_PRE_KEY, 'all']
key = '.'.join(key)
settings = cache.get(key)
if not settings:
settings = Setting.objects.all()
is_set = cache.add(key, settings)
if not is_set:
cache.set(key, settings)
contexts = {}
for setting in settings:
context_key = [setting.scope, setting.scope_category,
setting.name]
context_key = '_'.join(context_key)
value = setting.get_value().strip()
if setting.data_type == 'boolean':
value = value[0].lower() == 't'
if setting.data_type == 'int':
if value.strip():
try: # catch possible errors when int() is called
value = int(value.strip())
except ValueError:
value = 0
else:
value = 0 # default to 0
# Handle context for the social_media addon's
# contact_message setting
if setting.name == 'contact_message':
page_url = request.build_absolute_uri()
message_context = {'page_url': page_url}
message_context = Context(message_context)
message_template = Template(value)
value = message_template.render(message_context)
contexts[context_key.upper()] = value
contexts['TENDENCI_VERSION'] = version
contexts['USE_I18N'] = d_settings.USE_I18N
contexts['LOGIN_URL'] = d_settings.LOGIN_URL
contexts['LOGOUT_URL'] = d_settings.LOGOUT_URL
return contexts
def app_dropdown(request):
"""
Context processor for getting the template
needed for a module setting dropdown
"""
context = {}
path = request.get_full_path().strip('/')
path = path.split('/')
if len(path) < 3:
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'site_settings/top_nav.html'})
else:
if path[0] == 'settings' and path[1] == 'module':
try:
get_template(path[2]+'/top_nav.html')
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': path[2]+'/top_nav.html'})
except TemplateDoesNotExist:
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'site_settings/top_nav.html'})
# special case profile setting as users
if path[2] == 'users':
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'profiles/top_nav.html'})
if path[2] == 'groups':
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'user_groups/top_nav.html'})
if path[2] == 'make_payment':
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'make_payments/top_nav.html'})
else:
context.update({'ADMIN_MENU_APP_TEMPLATE_DROPDOWN': 'site_settings/top_nav.html'})
return context
| gpl-3.0 | -9,005,293,677,698,286,000 | 33.768421 | 98 | 0.598244 | false | 3.979518 | false | false | false |
DantestyleXD/MVM5B_BOT | plugins/msg.py | 1 | 1336 | # -*- coding: utf-8 -*-
from config import *
print(Color(
'{autored}[{/red}{autoyellow}+{/yellow}{autored}]{/red} {autocyan} msg.py importado.{/cyan}'))
@bot.message_handler(commands=['msg'])
def command_msg(m):
cid = m.chat.id
uid = m.from_user.id
try:
send_udp('msg')
except Exception as e:
bot.send_message(52033876, send_exception(e), parse_mode="Markdown")
if not is_recent(m):
return None
if is_admin(uid):
if len(m.text.split(' ')) >= 3:
if isint(m.text.split(' ')[1]):
try:
bot.send_message(
m.text.split(' ')[1], ' '.join(
m.text.split(' ')[
2:]))
except:
bot.send_message(
cid, "Error. No se pudo enviar mensaje, quizá ya no es usuario.")
else:
bot.send_message(
cid, "Éxito. Mensaje enviado satisfactoriamente.")
else:
bot.send_message(
cid, "Error. Debes introducir un número como ID.")
else:
bot.send_message(
cid,
"Error. Debes introducir `/msg <ID> <Mensaje>`",
parse_mode="Markdown")
| gpl-2.0 | 4,006,866,989,434,939,000 | 32.325 | 99 | 0.458365 | false | 3.622283 | false | false | false |
mysociety/pombola | pombola/core/management/merge.py | 1 | 5929 | # This base class is to make it easier to write management commands
# for merging object in Pombola (e.g. Person and Organisation at the
# moment).
from optparse import make_option
import sys
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from django.db import transaction
from slug_helpers.models import SlugRedirect
from images.models import Image
import pombola.core.models as core_models
def check_basic_fields(basic_fields, to_keep, to_delete):
"""Return False if any data might be lost on merging"""
safe_to_delete = True
for basic_field in basic_fields:
if basic_field == 'summary':
# We can't just check equality of summary fields because
# they are MarkupField fields which don't have equality
# helpfully defined (and they're always different objects
# between two different speakers), so instead, check for
# equality of the rendered content of the summary.
delete_value = to_delete.summary.rendered
keep_value = to_keep.summary.rendered
else:
delete_value = getattr(to_delete, basic_field)
keep_value = getattr(to_keep, basic_field)
if keep_value != delete_value:
# i.e. there's some data that might be lost:
safe_to_delete = False
message = "Mismatch in '%s': '%s' ({%d}) and '%s' (%d)"
print >> sys.stderr, message % (basic_field,
keep_value,
to_keep.id,
delete_value,
to_delete.id)
return safe_to_delete
class MergeCommandBase(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--keep-object", dest="keep_object", type="string",
help="The ID or slug of the object to retain",
metavar="OBJECT-ID"),
make_option("--delete-object", dest="delete_object", type="string",
help="The ID or slug of the object to delete",
metavar="OBJECT-ID"),
make_option('--noinput', dest='interactive',
action='store_false', default=True,
help="Do NOT prompt the user for input of any kind"),
make_option("--quiet", dest="quiet",
help="Suppress progress output",
default=False, action='store_true'))
admin_url_name = None
basic_fields_to_check = ()
model_class = None
def model_specific_merge(self, to_keep, to_delete, **options):
pass
def get_by_slug_or_id(self, identifier):
try:
return self.model_class.objects.get(slug=identifier)
# AttributeError catches the case where there is no slug field.
except self.model_class.DoesNotExist, AttributeError:
try:
object_id = int(identifier)
except ValueError:
raise (
self.model_class.DoesNotExist,
"Object matching query does not exist."
)
return self.model_class.objects.get(pk=object_id)
@transaction.atomic
def handle(self, *args, **options):
if not options['keep_object']:
raise CommandError("You must specify --keep-object")
if not options['delete_object']:
raise CommandError("You must specify --delete-object")
if args:
message = "Don't supply arguments, only --keep-object and --delete-object"
raise CommandError(message)
to_keep = self.get_by_slug_or_id(options['keep_object'])
to_delete = self.get_by_slug_or_id(options['delete_object'])
to_keep_admin_url = reverse(self.admin_url_name, args=(to_keep.id,))
if to_keep.id == to_delete.id:
raise CommandError("--keep-object and --delete-object are the same")
print "Going to keep:", to_keep, "with ID", to_keep.id
print "Going to delete:", to_delete, "with ID", to_delete.id
if options['interactive']:
answer = raw_input('Do you wish to continue? (y/[n]): ')
if answer != 'y':
raise CommandError("Command halted by user, no changes made")
if not check_basic_fields(
self.basic_fields_to_check,
to_keep,
to_delete,
):
raise CommandError("You must resolve differences in the above fields")
content_type = ContentType.objects.get_for_model(self.model_class)
self.model_specific_merge(to_keep, to_delete, **options)
# Replace the object on all models with generic foreign keys in core
for model in (core_models.Contact,
core_models.Identifier,
core_models.InformationSource):
model.objects.filter(content_type=content_type,
object_id=to_delete.id) \
.update(object_id=to_keep.id)
# Add any images for the object to delete as non-primary
# images for the object to keep:
Image.objects.filter(content_type=content_type,
object_id=to_delete.id) \
.update(is_primary=False,
object_id=to_keep.id)
# Make sure the old slug redirects to the object to keep:
SlugRedirect.objects.create(
new_object=to_keep,
old_object_slug=to_delete.slug,
)
# Finally delete the now unnecessary object:
to_delete.delete()
if not options['quiet']:
print "Now check the remaining object (", to_keep_admin_url, ")"
print "for any duplicate information."
| agpl-3.0 | -3,477,062,285,332,210,000 | 39.333333 | 86 | 0.580368 | false | 4.395107 | false | false | false |
leesdolphin/rentme | swagger/__main__.py | 1 | 22084 | import asyncio
import json
from pprint import pprint as pp
import re
from urllib.parse import urljoin
import aioutils.aiohttp
import aiohttp
import bs4
from bs4 import BeautifulSoup
import more_itertools
from aioutils.task_queues import SizeBoundedTaskList
from .parse_types import load_enum_into_item, parse_type_format
class TypesEncoder(json.JSONEncoder):
def default(self, o):
if hasattr(o, 'items'):
return dict(o)
else:
super().default(o)
error_definitions = {
'ErrorResult': {
'type': 'object',
'properties': {
'Request': {'type': 'string'},
'ErrorDescription': {'type': 'string'},
'Error': {'$ref': '#/definitions/Error'},
},
},
'Error': {
'type': 'object',
'properties': {
'Code': {'type': 'string'},
'UserDescription': {'type': 'string'},
'DeveloperDescription': {'type': 'string'},
'ErrorData': {
'type': 'array',
'items': {'$ref': '#/definitions/ErrorDataItem'},
},
},
},
'ErrorDataItem': {
'type': 'object',
'properties': {
'Name': {'type': 'string'},
'Value': {'type': 'string'},
},
},
}
standard_responses = {
'304': {
'description': 'Used with caching to indicate that the cached copy is'
' still valid.'
},
'400': {
'description': 'The request is believed to be invalid in some way. The'
' response body will contain an error message. You'
' should display the error message to the user.',
'schema': {'type': 'object', '$ref': '#/definitions/ErrorResult'},
},
'401': {
'description': 'An OAuth authentication failure occurred. You should'
' ask the user to log in again.',
'schema': {'type': 'object', '$ref': '#/definitions/ErrorResult'},
},
'429': {
'description': 'Your rate limit has been exceeded. Your rate limit'
' will reset at the start of the next hour. You should'
' not attempt to make any more calls until then.',
'schema': {'type': 'object', '$ref': '#/definitions/ErrorResult'},
},
'500': {
'description': 'A server error occurred. You should display a generic'
' “whoops” error message to the user.',
'schema': {'type': 'object', '$ref': '#/definitions/ErrorResult'},
},
'503': {
'description': 'Planned server maintenance is underway. General error'
' details and auction extension details are provided in'
' the response. You should consume this information to'
' inform the end user.',
'schema': {'type': 'object', '$ref': '#/definitions/ErrorResult'},
},
}
class DefinitionContainer():
def __init__(self):
self.definitions = {}
self.reverse = {}
def add_definition(self, prefered_name, definition):
if not prefered_name.isidentifier():
print("Invalid identifier {!r}".format(prefered_name))
rev_lookup = json.dumps(definition, indent=2, sort_keys=True, cls=TypesEncoder)
rev_names = self.reverse.setdefault(rev_lookup, [])
if prefered_name in rev_names:
return prefered_name
elif prefered_name not in self.definitions:
self.reverse[rev_lookup].append(prefered_name)
self.definitions[prefered_name] = definition
return prefered_name
attempts = 0
while attempts < 10:
new_name = prefered_name + str(attempts)
if new_name in rev_names:
return new_name
elif new_name not in self.definitions:
self.reverse[rev_lookup].append(new_name)
self.definitions[new_name] = definition
return new_name
attempts += 1
raise Exception('Failed to generate unique name for'
' model {}.{}'.format(prefered_name))
def iter_heading_contents(children):
heading_tags = frozenset({'h1', 'h2', 'h3', 'h4'})
last_heading = None
last_table = None
last_paragraphs = []
expanded_children = []
for child in children:
if child.name == 'div':
div_children = child.contents
child_tag_names = {c.name for c in div_children}
if heading_tags & child_tag_names:
expanded_children += div_children
else:
expanded_children.append(child)
for child in expanded_children:
if child.name in heading_tags:
if last_paragraphs or last_table or last_heading:
yield last_heading, last_table, last_paragraphs
last_heading = child
last_paragraphs = []
last_table = None
elif not child.name:
last_paragraphs.append(child)
elif child.name == 'tr' and last_table:
last_table.append(child)
elif child.name == 'table':
last_table = child
elif child.find('table'):
last_table = child.find('table')
else:
last_paragraphs.append(child)
if last_paragraphs or last_table or last_heading:
yield last_heading, last_table, last_paragraphs
def safe_add(orig, *new):
orig = dict(orig)
for new_dict in new:
for key, value in dict(new_dict).items():
if key in orig:
if value != orig[key]:
print('Warning. Key already defined, ', key)
# from pprint import pformat
# import difflib
# print(''.join(difflib.ndiff(
# pformat(orig[key]).splitlines(keepends=True),
# pformat(value).splitlines(keepends=True),
# )))
else:
orig[key] = value
return orig
def definition_union(orig, *new):
out = dict(orig)
for new_dict in new:
for key, value in dict(new_dict).items():
if key not in out:
out[key] = value
else:
new_props = value['properties']
out_props = out[key]['properties']
out_props.update(new_props)
return out
def split_definition_paragraphs(paragraphs):
paragraphs = iter(paragraphs)
def_line = None
lines = []
for para in paragraphs:
if def_line is None:
ptext = text(para)
if ptext:
def_line = ptext
else:
lines.append(para)
assert def_line
return def_line, paragraphs_to_markdown(*lines)
def paragraphs_to_markdown(*paras, indent=0):
paragraphs = []
for item in paras:
if item.name in ['ul', 'ol']:
lst = []
prefix = ' - ' if item.name == 'ul' else '1. '
for li in item.children:
if li.name == 'li':
lst.append(prefix + paragraphs_to_markdown(
li, indent=indent + 3))
paragraphs.append('\n'.join(lst))
elif item.name is None or not (item.find('ul,ol')):
paragraphs.append(text(item))
else:
paragraphs.append(paragraphs_to_markdown(
*item.children, indent=indent))
paragraphs = filter(lambda s: s.strip(), paragraphs)
if indent != 0:
new_paras = []
i_chars = ' ' * indent
for para in paragraphs:
para = '\n'.join(i_chars + line for line in para.splitlines())
new_paras.append(para)
paragraphs = new_paras
return '\n\n'.join(paragraphs)
def text(*elms, one_line=True, strip=True, sep=' '):
text_elms = []
for elm in elms:
if elm.name is None:
child_elms = [elm]
else:
child_elms = elm.children
for e in child_elms:
if isinstance(e, bs4.NavigableString):
if isinstance(e, bs4.Comment):
continue
txt = str(e)
txt = re.sub(r'[ \n\t]+', ' ', txt)
text_elms.append(txt)
elif e.name == 'br':
text_elms.append(' ' if one_line else '\n')
elif e.name not in ['script', 'style']:
text_elms.append(text(e, one_line=one_line, strip=False))
text_elms.append(sep)
t = ''.join(text_elms)
t = re.sub(r'[ ]+', ' ', t)
if not one_line:
t = re.sub(r'[ ]*\n[ ]*', '\n', t)
if strip:
t = t.strip()
return t
async def generate_swagger_from_docs(session, url, definitions):
KNOWN_BAD_HEADINGS = {
'Request Builder',
'Request',
'Response',
'Examples',
'Example XML Request (switch to JSON)',
'Example JSON Request (switch to XML)',
'Example XML Response (switch to JSON)',
'Example JSON Response (switch to XML)',
}
txt = None
while txt is None:
try:
async with session.get(url) as o:
txt = await o.text()
except aiohttp.ServerDisconnectedError:
txt = None
print('Server disconnect for', url)
continue
txt = re.sub(r"""</table>
</td>
</tr>
<p>\[/tm_private\]\s*</.*>(\n\s*</table>)?""", '<!-- [/tm_private] -->', txt)
soup = BeautifulSoup(txt, 'html.parser')
for selector in ['.site-tools', '.primary-tools', '.crumbs', '.sprite', '.site-footer', '.hide', '.site-header', '.xml-message', '.json-message', '#requestBuilderForm', '#responseBody']:
for tag in soup.select(selector):
tag.decompose()
for tag_name in ['script', 'link', 'meta', 'style', 'pre']:
for tag in soup.find_all(tag_name):
tag.decompose()
txt = soup.prettify()
txt = re.sub(r""" </div>
</div>
</div>
</div>""", '', txt)
txt = re.sub(r'</(body|html)>', '', txt)
soup = BeautifulSoup(txt, 'html.parser')
# u = url.replace('https://developer.trademe.co.nz/api-reference/', '').replace('api-index/', '').replace('/', '-')
content = soup.select('div.generated-content', limit=1)[0]
content_iter = iter(iter_heading_contents(content.children))
path = {
'externalDocs': {
'description': 'Original TradeMe Documentation',
'url': url
},
}
params = []
metadata = None
response = None
for heading, table, paragraphs in content_iter:
if heading is None:
metadata = parse_metadata(table)
path['produces'] = convert_supported_formats_to_mime(
metadata['Supported Formats'])
path['description'] = paragraphs_to_markdown(*paragraphs)
continue
heading_text = text(heading)
if heading_text in ['URL parameters', 'Query String parameters']:
if heading_text == 'URL parameters':
in_type = 'path'
elif heading_text == 'Query String parameters':
in_type = 'query'
else:
raise Exception('Unkown Heading')
params += parse_params(in_type, table)
elif heading_text in ['POST Data', 'Returns']:
name, desc = split_definition_paragraphs(paragraphs)
dfn_obj = parse_type_format(name)
dfn_ref = get_refname(dfn_obj)
if dfn_ref:
dfn_obj = parse_response(dfn_obj, desc, definitions, table=table)
else:
dfn_obj['description'] = desc
if heading_text == 'POST Data':
params += [{
'in': 'body',
'schema': dfn_obj,
}]
elif heading_text == 'Returns':
response = {
'description': desc,
'schema': dfn_obj,
}
else:
raise Exception('Unkown Heading')
elif heading_text in KNOWN_BAD_HEADINGS:
continue
else:
print(heading_text)
raise Exception()
path['responses'] = safe_add({
'200': response,
}, standard_responses)
return {
metadata['URL'].replace('https://api.trademe.co.nz/v1', ''): {
metadata['HTTP Method'].lower(): path,
'parameters': params,
}
}
def convert_supported_formats_to_mime(supported_formats):
formats = map(str.strip, supported_formats.split(','))
format_mapping = {
'JSON': 'application/json',
'XML': 'text/xml'
}
mime_types = []
for fmt in formats:
if fmt in format_mapping:
mime_types.append(format_mapping[fmt])
elif fmt.upper() in format_mapping:
mime_types.append(format_mapping[fmt.upper()])
else:
raise ValueError('Unsupported format' + fmt)
return mime_types
def parse_metadata(table):
data = {}
for row in table.find_all('tr'):
key = text(row.find('th'))
value = text(row.find('td'))
if key.endswith('?'):
value = (value == 'Yes')
key = key[:-1]
data[key] = value
return data
def parse_params(in_type, table):
table_iter = iter(iter_parse_nested_table(table))
params = []
for t, key, value, desc in table_iter:
if t != 'kv':
print('NOTKV', t, key, value, desc)
raise Exception('not kv')
data = parse_type_format(value)
data['name'] = key
data['description'] = desc
if in_type:
data['in'] = in_type
if 'enum' in data:
enum_row = next(table_iter)
data = load_enum_into_item(enum_row, data)
if '$ref' in data:
print('Unsupported type', data['$ref'])
raise Exception()
params.append(data)
return params
def get_refname(data):
try:
return data.ref_name
except AttributeError:
return None
def parse_response(dfn_obj, docs, definitions, *, table=None, table_iter=None):
if table_iter is None:
assert table is not None
table_iter = iter(iter_parse_nested_table(table))
else:
assert table is None
table_iter = more_itertools.peekable(table_iter)
this_dfn = {}
for t, key, value, desc in table_iter:
if t != 'kv':
print('NOTKV', t, key, value, desc)
print(this_dfn)
raise Exception('Not KV')
continue
data = parse_type_format(value)
ref_name = get_refname(data)
data['description'] = desc
if 'enum' in data:
enum_row = next(table_iter)
data = load_enum_into_item(enum_row, data)
if 'enum' in data.get('items', []):
enum_row = next(table_iter)
data['items'] = load_enum_into_item(enum_row, data['items'])
elif ref_name:
if table_iter.peek([None])[0] == 'nested':
t, _, values, _ = next(table_iter)
if values is not None:
data = parse_response(data, desc, definitions, table_iter=values)
else:
print('xx', table_iter.peek([None]))
this_dfn[key] = data
dfn_obj.ref_name = definitions.add_definition(get_refname(dfn_obj), {
'type': 'object',
'properties': this_dfn,
})
return dfn_obj
def iter_parse_nested_table(table):
for row in filter(lambda e: e.name == 'tr', table.children):
td = row.find('td')
next_td = td.find_next_sibling('td') if td else None
if not next_td:
if td.find('table'):
yield ('nested', None,
iter_parse_nested_table(td.find('table')), None)
else:
assert text(td) == '(This type has already been defined)'
yield ('nested', None, None, None)
elif 'colspan' in next_td.attrs:
yield ('enum', None, parse_enum_table(next_td.find('table')), None)
elif row.find('th'):
key = text(row.find('th'))
value = text(td)
description = text(next_td)
yield ('kv', key, value, description)
else:
raise Exception()
def parse_enum_table(table):
return list(iter_parse_enum_table(table))
def iter_parse_enum_table(table):
enum_values = set()
for row in table.find_all('tr'):
tds = row.find_all('td')
if len(tds) == 2:
name = text(tds[0])
value = None
description = text(tds[1])
ev = name
elif len(tds) == 3:
name = text(tds[0])
value = text(tds[1])
description = text(tds[2])
ev = value
else:
continue
if ev not in enum_values:
enum_values.add(ev)
yield (name, value, description)
async def iter_api_index(session):
url = 'https://developer.trademe.co.nz/api-reference/api-index/'
async with session.get(url) as o:
soup = BeautifulSoup(await o.text(), 'lxml')
x = []
for item in soup.select('.content tr'):
if '(deprecated)' in text(item):
continue
link = item.find('a')
if link and 'href' in link.attrs:
href = urljoin(url, link.attrs['href'])
if '/api-reference/' in href:
x.append(href)
return x
async def iter_api_methods_page(session, url):
if not url.startswith('http'):
url = 'https://developer.trademe.co.nz/api-reference/' + url + '/'
async with session.get(url) as o:
soup = BeautifulSoup(await o.text(), 'lxml')
x = []
for item in soup.select('div.generated-content li'):
if '(deprecated)' in text(item):
continue
link = item.find('a')
if link and 'href' in link.attrs:
href = urljoin(url, link.attrs['href'])
if '/api-reference/' in href:
x.append(href)
return x
async def download_swagger_for_urls(session, urls, definitions=None):
if not definitions:
definitions = DefinitionContainer()
paths = {}
urls = sorted(set(urls))
async with SizeBoundedTaskList(5) as tl:
for url in urls:
await tl.add_task(generate_swagger_from_docs(
session,
url,
definitions
))
for doc_task in tl.as_completed():
gen_path = await doc_task
# TODO: union paths taking into account the http method and url.
paths = safe_add(paths, gen_path)
return paths, definitions.definitions
async def main():
paths = {}
async with aioutils.aiohttp.CachingClientSession(
cache_strategy=aioutils.aiohttp.OnDiskCachingStrategy(
cache_folder='./.cache')
) as session:
# urls = await iter_api_index(session)
# paths, defs = await download_swagger_for_urls(session, [
# 'https://developer.trademe.co.nz/api-reference/listing-methods/retrieve-the-details-of-a-single-listing/',
# ])
paths, defs = await download_swagger_for_urls(session, [
'https://developer.trademe.co.nz/api-reference/membership-methods/retrieve-member-profile/',
# 'https://developer.trademe.co.nz/api-reference/search-methods/rental-search/',
# 'https://developer.trademe.co.nz/api-reference/search-methods/flatmate-search/',
])
# _, extra_defs = await download_swagger_for_urls(session, ['https://developer.trademe.co.nz/api-reference/selling-methods/edit-an-item/'])
for existing in (
# 'Address',
# 'Agency',
# 'Agent',
# 'Attribute',
# 'AttributeOption',
# 'AttributeRange',
# 'Bid',
# 'BidCollection',
# 'Branding',
# 'BroadbandTechnology',
# 'Charity',
# 'ContactDetails',
# 'CurrentShippingPromotion',
# 'Dealer',
# 'DealerShowroom',
# 'DealershipPhoneNumbers',
# 'Dealership',
# 'DealershipListingCounts',
# 'EmbeddedContent',
# 'FixedPriceOfferDetails',
# 'FixedPriceOfferRecipient',
# 'GeographicLocation',
# 'LargeBannerImage',
# 'ListedItemDetail',
# 'Member',
# 'MemberRequestInformation',
# 'MotorWebBasicReport',
# 'OpenHome',
# 'Option',
# 'OptionSetValues',
# 'OptionSet',
# 'Photo',
# 'PhotoUrl',
# 'Question',
# 'Questions',
# 'RefundDetails',
# 'Sale',
# 'ShippingOption',
# 'SimpleMemberProfile',
# 'SponsorLink',
# 'Store',
# 'StorePromotion',
'Variant',
# 'VariantDefinition',
):
defs.pop(existing, None)
print(list(defs))
swagger = {
'swagger': '2.0',
'info': {
'title': 'TradeMe API',
'version': '0.0',
},
'schemes': ['https'],
'host': 'api.trademe.co.nz',
'basePath': '/v1/',
'paths': paths,
'definitions': defs,
}
with open('swagger.json', 'w') as f:
json.dump(swagger, f, sort_keys=True, indent=2, cls=TypesEncoder)
with open('swagger.json') as f:
names = set()
for line in f:
if '#/definitions/' in line:
pos = line.index('#/definitions/') + len('#/definitions/')
name = line[pos:-3]
names.add(name)
for name in sorted(names):
if name not in defs:
print(name)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| agpl-3.0 | 2,496,726,057,175,298,600 | 32.865031 | 190 | 0.526404 | false | 3.992767 | false | false | false |
boudewijnrempt/HyvesDesktop | 3rdparty/socorro/scripts/config/commonconfig.py | 1 | 1521 | import socorro.lib.ConfigurationManager as cm
import datetime
databaseHost = cm.Option()
databaseHost.doc = 'the hostname of the database servers'
databaseHost.default = '127.0.0.1'
databaseName = cm.Option()
databaseName.doc = 'the name of the database within the server'
databaseName.default = 'socorro'
databaseUserName = cm.Option()
databaseUserName.doc = 'the user name for the database servers'
databaseUserName.default = 'socorro'
databasePassword = cm.Option()
databasePassword.doc = 'the password for the database user'
databasePassword.default = 'socorro'
storageRoot = cm.Option()
storageRoot.doc = 'the root of the file system where dumps are found'
storageRoot.default = '/var/socorro/toBeProcessed/'
deferredStorageRoot = cm.Option()
deferredStorageRoot.doc = 'the root of the file system where dumps are found'
deferredStorageRoot.default = '/var/socorro/toBeDeferred/'
dumpDirPrefix = cm.Option()
dumpDirPrefix.doc = 'dump directory names begin with this prefix'
dumpDirPrefix.default = 'bp_'
jsonFileSuffix = cm.Option()
jsonFileSuffix.doc = 'the suffix used to identify a json file'
jsonFileSuffix.default = '.json'
dumpFileSuffix = cm.Option()
dumpFileSuffix.doc = 'the suffix used to identify a dump file'
dumpFileSuffix.default = '.dump'
processorCheckInTime = cm.Option()
processorCheckInTime.doc = 'the time after which a processor is considered dead (HH:MM:SS)'
processorCheckInTime.default = "00:05:00"
processorCheckInTime.fromStringConverter = lambda x: str(cm.timeDeltaConverter(x))
| gpl-2.0 | -4,978,995,620,722,430,000 | 34.372093 | 91 | 0.78238 | false | 3.504608 | false | false | false |
andarms/pyweek22 | bulletml/collision.py | 1 | 3119 | """Simple collision check.
This module provides simple collision checking appropriate for
shmups. It provides routines to check whether two moving circles
collided during the past frame.
An equivalent C-based version will be used automatically if it was
compiled and installed with the module. If available, it will be noted
in the docstrings for the functions.
Basic Usage:
from bulletml.collision import collides
for bullet in bullets:
if collides(player, bullet): ... # Kill the player.
"""
from __future__ import division
def overlaps(a, b):
"""Return true if two circles are overlapping.
Usually, you'll want to use the 'collides' method instead, but
this one can be useful for just checking to see if the player has
entered an area or hit a stationary oject.
(This function is unoptimized.)
"""
dx = a.x - b.x
dy = a.y - b.y
try:
radius = a.radius + b.radius
except AttributeError:
radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)
return dx * dx + dy * dy <= radius * radius
def collides(a, b):
"""Return true if the two moving circles collide.
a and b should have the following attributes:
x, y - required, current position
px, py - not required, defaults to x, y, previous frame position
radius - not required, defaults to 0.5
(This function is unoptimized.)
"""
# Current locations.
xa = a.x
xb = b.x
ya = a.y
yb = b.y
# Treat b as a point, we only need one radius.
try:
radius = a.radius + b.radius
except AttributeError:
radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)
# Previous frame locations.
try: pxa = a.px
except KeyError: pxa = xa
try: pya = a.py
except KeyError: pya = ya
try: pxb = b.px
except KeyError: pxb = xb
try: pyb = b.py
except KeyError: pyb = yb
# Translate b's final position to be relative to a's start.
# And now, circle/line collision.
dir_x = pxa + (xb - xa) - pxb
dir_y = pya + (yb - ya) - pyb
diff_x = pxa - pxb
diff_y = pya - pyb
if (dir_x < 0.0001 and dir_x > -0.0001
and dir_y < 0.0001 and dir_y > -0.0001):
# b did not move relative to a, so do point/circle.
return diff_x * diff_x + diff_y * diff_y < radius * radius
# dot(diff, dir) / dot(dir, dir)
t = (diff_x * dir_x + diff_y * dir_y) / (dir_x * dir_x + dir_y * dir_y)
if t < 0:
t = 0
elif t > 1:
t = 1
dist_x = pxa - (pxb + dir_x * t)
dist_y = pya - (pyb + dir_y * t)
# dist_sq < radius_sq
return dist_x * dist_x + dist_y * dist_y <= radius * radius
def collides_all(a, others):
"""Filter the second argument to those that collide with the first.
This is equivalent to filter(lambda o: collides(a, o), others),
but is much faster when the compiled extension is available (which
it is not currently).
"""
return filter(lambda o: collides(a, o), others)
try:
from bulletml._collision import collides, overlaps, collides_all
except ImportError:
pass
| mit | -1,800,408,118,301,877,000 | 26.848214 | 75 | 0.621994 | false | 3.34298 | false | false | false |
linuxwhatelse/pyloader | tests/test_dlable.py | 1 | 1916 | from context import pyloader
import os
import json
import unittest
_current = os.path.dirname(os.path.abspath(__file__))
paths = {
'writable': os.path.join(_current, 'downloads', 'write_access'),
'not_writable': os.path.join(_current, 'downloads', 'no_write_access')
}
class TestDLable(unittest.TestCase):
def test_access_writable(self):
try:
pyloader.DLable(
'http://url.doesnt.matter',
paths['writable']
)
self.assertTrue(True)
except IOError:
self.assertTrue(False)
def test_access_writable_none_existant(self):
try:
pyloader.DLable(
'http://url.doesnt.matter',
os.path.join(paths['writable'], 'sub')
)
self.assertTrue(True)
except IOError:
self.assertTrue(False)
def test_access_not_writeable(self):
self.assertRaises(IOError,
pyloader.DLable,
'http://url.doesnt.matter',
paths['not_writable'])
def test_serialize_proper(self):
item = pyloader.DLable(
'http://url.doesnt.matter',
paths['writable']
)
try:
data = item.to_json()
pyloader.DLable.from_json(data)
self.assertTrue(True)
except Exception:
self.assertTrue(False)
def test_serialize_missing_required(self):
item = pyloader.DLable(
'http://url.doesnt.matter',
paths['writable']
)
data = item.to_json()
# Remove a required argument
data = json.loads(data)
del data['target_dir']
data = json.dumps(data)
self.assertRaises(TypeError,
pyloader.DLable.from_json, data)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 8,621,170,405,861,588,000 | 23.564103 | 74 | 0.529749 | false | 4.138229 | true | false | false |
ingfy/FisherSimulation | gui/worldmap.py | 1 | 7210 | import wx
import math
from BufferedCanvas import BufferedCanvas
def gradient(min_color, max_color, fraction):
return tuple(a + (b - a) * fraction for a, b in zip(min_color, max_color))
class WorldMap(BufferedCanvas):
min_vote_color = (0, 128, 0)
max_vote_color = (128, 0, 0)
def __init__(self, parent):
self._size = (600, 600)
self._data = None
BufferedCanvas.__init__(self, parent, size = self._size)
self.SetBackgroundColour("white")
self._agent_colors = {}
self._complaints = []
self.num_max_complaints = 10
self.BufferBmp = None
self.update()
def set_map(self, map):
self._data = {"map": map}
def reset_votes(self):
self._agent_colors = {}
self._complaints = []
def add_votes(self, complaints):
self._complaints = complaints
votes_by_agent = {}
for v in self._complaints:
if not v.agent_id in votes_by_agent:
votes_by_agent[v.agent_id] = 0
votes_by_agent[v.agent_id] += 1
for a in votes_by_agent:
self._agent_colors[a] = gradient(
WorldMap.min_vote_color,
WorldMap.max_vote_color,
float(votes_by_agent[a]) / self.num_max_complaints
)
def draw(self, dc):
dc.Clear()
dc.SetBackground(wx.Brush(wx.Colour(255, 255, 255), wx.SOLID))
if self._data is None: return
try:
map = self._data["map"]
num_hor, num_ver = (len(map.grid), len(map.grid[0]))
w, h = self.GetSize()
cell_w, cell_h = (float(w) / num_hor, float(h) / num_hor)
grid_color = wx.Colour(0, 0, 0)
dc.SetPen(wx.Pen(grid_color, 1))
# Draw grid
## Horizontal lines
for x in xrange(num_hor + 1):
dc.DrawLine(cell_w * x, 0, cell_w * x, h)
## Vertical lines
for y in xrange(num_ver + 1):
dc.DrawLine(0, cell_h * y, w, cell_h * y)
fish_color = wx.Colour(0, 0, 255)
fish_pen = wx.Pen(fish_color, 1)
fish_brush = wx.Brush(fish_color, wx.SOLID)
boat_color = wx.Colour(100, 100, 100)
aquaculture_border_color = wx.Colour(0, 0, 0)
aquaculture_fill_color = wx.Colour(200, 200, 200)
aquaculture_pen = wx.Pen(aquaculture_border_color, 1)
aquaculture_brush = wx.Brush(aquaculture_fill_color, wx.SOLID)
land_color = wx.Colour(0, 255, 0)
land_len = wx.Pen(land_color, 1)
land_brush = wx.Brush(land_color, wx.SOLID)
blocked_color = wx.Colour(255, 0, 0)
# Draw entities
for i in xrange(num_hor):
for j in xrange(num_ver):
x, y = (cell_w * i, cell_h * j)
#if map.grid[i][j].spawning:
draw_fish_top_left(dc, map.grid[i][j].quality, x, y,
cell_w, cell_h, fish_pen, fish_brush)
if map.grid[i][j].blocked:
draw_blocked(dc, x, y, cell_w, cell_h, blocked_color)
if map.grid[i][j].fisherman:
color = next((self._agent_colors[e.id] for e in
map.grid[i][j].fishermen if e.id in
self._agent_colors), WorldMap.min_vote_color)
draw_boat_bottom_right(dc, x, y, cell_w, cell_h,
color, map.grid[i][j].num_fishermen)
if map.grid[i][j].aquaculture:
draw_aquaculture_center(dc, x + cell_w / 2,
y + cell_h / 2, cell_w, cell_h, aquaculture_pen,
aquaculture_brush)
if map.grid[i][j].land:
draw_land(dc, x, y, cell_w, cell_h, land_pen,
land_brush)
return True
except Exception, e:
print e
return False
def draw_blocked(dc, x, y, cell_w, cell_h, color):
dc.SetPen(wx.Pen(color, 2))
dc.DrawLine(x, y, x + cell_w, y + cell_h)
dc.DrawLine(x + cell_w, y, x, y + cell_h)
def draw_land(dc, x, y, cell_w, cell_h, p, b):
dc.SetPen(p)
dc.SetBrush(b)
dc.DrawRectangle(x, y, cell_w, cell_h)
def draw_aquaculture_center(dc, x, y, cell_w, cell_h, p, b):
scale = min(cell_w, cell_h)
corners = 10
dc.SetPen(p)
dc.SetBrush(b)
points = [wx.Point(
x + scale / 2 * math.sin(2 * math.pi * p / corners),
y + scale / 2 * math.cos(2 * math.pi * p / corners)
) for p in xrange(corners)]
dc.DrawPolygon(points)
def draw_boat_center(dc, x, y, cell_w, cell_h, color, num):
scale = min(cell_w, cell_h)
dc.SetPen(wx.Pen(color, 1))
dc.SetBrush(wx.Brush(color, wx.SOLID))
# Draw boat bottom
dc.DrawArc(x - scale / 3, y, x + scale / 3, y, x, y)
# Draw sail
dc.DrawPolygon([wx.Point(x - scale / 4, y - scale / 8),
wx.Point(x + scale / 4, y - scale / 8),
wx.Point(x, y - scale / 8 - scale / 4)])
# Draw mast
dc.DrawLine(x, y, x, y - scale / 8)
if num > 1:
dc.SetFont(wx.Font(
pointSize=scale/3,
family=wx.FONTFAMILY_DEFAULT,
style=wx.FONTSTYLE_NORMAL,
weight=wx.FONTWEIGHT_BOLD))
dc.SetTextForeground(wx.Colour(255, 255, 125))
text = str(num)
tw, th = dc.GetTextExtent(text)
dc.DrawText(text, (x - tw / 2),
(y + scale / 6 - th / 2))
def draw_boat_bottom_right(dc, x, y , cell_w, cell_h, color, num):
scale = min(cell_w, cell_h)
ox = cell_w - scale / 3
oy = cell_h - scale / 8 - cell_h / 4
draw_boat_center(dc, ox + x, oy + y, cell_w, cell_h, color, num)
def draw_fish_center(dc, size, x, y, cell_w, cell_h, p, b):
scale = min(cell_w, cell_h) * size
dc.SetPen(p)
dc.SetBrush(b)
# Draw body
dc.DrawArc(x - scale / 3, y, x + scale / 3, y, x, y - scale / 6)
dc.DrawArc(x + scale / 3, y, x - scale / 3, y, x, y + scale / 6)
## right tip is located at (x + cell_w / 3, y)
# Draw tail
dc.DrawPolygon([wx.Point(x + scale / 3 + scale / 5, y - scale / 5),
wx.Point(x + scale / 3, y),
wx.Point(x + scale / 3 + scale / 5, y + scale / 5)])
def draw_fish_top_left(dc, size, x, y, cell_w, cell_h, p, b): # Offset from top left corner
scale = min(cell_w, cell_h)
ox = scale / 3
oy = scale / 5
draw_fish_center(dc, size, ox + x, oy + y, cell_w, cell_h, p, b) | mit | -2,952,585,843,266,592,300 | 36.983784 | 92 | 0.468932 | false | 3.234634 | false | false | false |
rrader/cdr-tools | generator/cdrgen/test.py | 1 | 8689 | import csv
from io import StringIO
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
from cdrgen.generate import CDRStream
from cdrgen.sources import UniformSource, UserProfileSource, UserProfile, UserProfileChangeBehaviorSource
from cdrgen.utils import asterisk_like, csv_to_cdr, time_of_day, day_of_week, window, grouper, RATES_1,\
it_merge, RATES_2, poisson_interval, moving_average_exponential, RATES_1m
import matplotlib.pyplot as plt
def test(source):
s = CDRStream(asterisk_like, source)
s.start()
hours = np.zeros(24)
l = []
days = 1
prev = 0
pattern = None
for st in s:
cdr = csv_to_cdr(list(csv.reader(StringIO(st), delimiter=','))[0])
if time_of_day(cdr.start) < prev:
days += 1
if days > 14 and pattern is None:
pattern = hours/days
hours = np.zeros(24)
if days > 14 and days%7 == 0:
print(np.linalg.norm(hours/(days-14) - pattern))
h = time_of_day(cdr.start)//60//60
hours[h] += 1
prev = time_of_day(cdr.start)
#l.append(h)
fingerprint = hours/days
print(fingerprint)
users = {}
# values needed to recalculate in real time to
# minimize all values: alarms rate, history length ALPHA and ALARM_THRESHOLD
ALARM_THRESHOLD = 1. # multiply limits
ALPHA_FREQ = 0.8 # mean multipler
ALPHA_WEEKS = 0.8
HISTORY = 2 # in weeks
CURRENT_WINDOW = 5 # to approximate current frequency
#=====
MIN_THRESHOLD = 9.e-6
APPROX_WINDOW = 1 # to approximate weekly frequency
TIME_DISCRETIZATION = 60*60
alarms = 0
class Pattern(object):
def __init__(self, user):
self.src = user
self.data = np.zeros(shape=(HISTORY, 7, 24)) # patterns 24x7 (history and one current)
self.current = np.zeros(CURRENT_WINDOW)
self.week_history = np.zeros(shape=(7, (24*60*60)//(TIME_DISCRETIZATION//APPROX_WINDOW)))
self.last_day_of_week = 0
self.weeks = 0
self.class_num = None
def extract_week_history(self):
return self.week_history
def maintain(self, cdr):
"""
Maintaining should be continuous
Calls have to be sorted by cdr.start time
"""
time = time_of_day(cdr.start)//(TIME_DISCRETIZATION//APPROX_WINDOW)
day = day_of_week(cdr.start)
if self.last_day_of_week != day and day == 0: # week switched
self.data = np.roll(self.data, 1, axis=0)
self.data[0] = self.extract_week_history()
self.week_history = np.zeros(shape=(7, (24*60*60)//(TIME_DISCRETIZATION//APPROX_WINDOW)))
self.weeks += 1
self.last_day_of_week = day
self.current = np.roll(self.current, 1) # for instantaneous frequency
self.current[0] = cdr.start
# new freq calc
current = np.roll(self.current, 1)
current[0] = cdr.start
diffs = np.array([e[0]-e[1] for e in zip(current, current[1:])])
current_freq = (60*60)/moving_average_exponential(diffs, ALPHA_FREQ)
self.week_history[day, time] = max(self.week_history[day, time], current_freq)
def is_conform(self, cdr):
# FIXME: pattern should no check conforming, it's another task
day = day_of_week(cdr.start)
freq = self.get_pattern()[day][time_of_day(cdr.start)//60//60]
current = np.roll(self.current, 1)
current[0] = cdr.start
diffs = np.array([e[0]-e[1] for e in zip(current, current[1:])])
current_freq = (60*60)/moving_average_exponential(diffs, ALPHA_FREQ)
limits = poisson_interval(freq, 1-0.997) # float
if not (current_freq <= max(1.0, limits[1]*ALARM_THRESHOLD)):
print(freq, current_freq, max(1, limits[1]*ALARM_THRESHOLD), )
return current_freq <= max(1.0, limits[1]*ALARM_THRESHOLD)
def is_converged(self):
return self.weeks >= HISTORY # FIXME
def alarm(self, cdr):
global alarms
alarms += 1
print("ALARM: user {} behavior changed".format(cdr.src))
def classify(self, class_num):
self.class_num = class_num
def get_pattern(self):
return moving_average_exponential(self.data, ALPHA_WEEKS)
def plot(self):
row_labels = list('MTWTFSS')
hours = list('0123456789AB')
column_labels = ["{}am".format(x) for x in hours] + \
["{}pm".format(x) for x in hours]
data = self.get_pattern()
fig, ax = plt.subplots()
ax.pcolor(data.transpose(), cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(data.shape[0])+0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.show()
def plot_pattern(self):
print(alarms)
plt.plot(list(range(24)), self.get_pattern()[0], 'yo-')
plt.plot(np.asarray(np.matrix(RATES_1[0])[:,0]).reshape(-1)//60//60,
np.asarray(np.matrix(RATES_1[0])[:,1]).reshape(-1)*60*60, 'ro-')
plt.show()
def process(source):
"""
Simple processing
"""
s = CDRStream(asterisk_like, source)
s.start()
for st in s:
cdr = csv_to_cdr(list(csv.reader(StringIO(st), delimiter=','))[0])
if not users.get(cdr.src):
users[cdr.src] = Pattern(cdr.src)
pattern = users[cdr.src]
if pattern.is_converged() and not pattern.is_conform(cdr):
pattern.alarm(cdr)
pattern.maintain(cdr)
def recalculate(time):
patterns = [p for p in users.values() if p.is_converged()]
if len(patterns) < 10:
return
X = np.matrix([x.get_pattern().ravel() for x in patterns])
km = KMeans(n_clusters=2, init='k-means++')
km.fit(X)
predicted = km.predict(X)
print(predicted)
for i,item in enumerate(predicted):
patterns[i].classify(item)
recalculate.km_time = time
def process_2(source):
s = CDRStream(asterisk_like, source)
s.start()
recalculate.km_time = 0
for st in s:
cdr = csv_to_cdr(list(csv.reader(StringIO(st), delimiter=','))[0])
if not users.get(cdr.src):
users[cdr.src] = Pattern(cdr.src)
pattern = users[cdr.src]
if pattern.is_converged() and not pattern.is_conform(cdr):
pattern.alarm(cdr)
pattern.maintain(cdr)
if cdr.start - recalculate.km_time >= 24*60*60*7:
# Once a week recalculate clusters
recalculate(cdr.start)
recalculate(cdr.start)
def test_uniform():
test(UniformSource(0, 24*60*60, rate=0.1))
def test_daily():
# Авторегрессионное интегрированное скользящее среднее
# https://docs.google.com/viewer?url=http%3A%2F%2Fjmlda.org%2Fpapers%2Fdoc%2F2011%2Fno1%2FFadeevEtAl2011Autoreg.pdf
TIME = 24*60*60*7*4*2
p1 = [UserProfileSource(0, TIME, profile=UserProfile(RATES_1, 10, 0.1)) for x in range(10)]
p2 = [UserProfileSource(0, TIME, profile=UserProfile(RATES_2, 10, 0.1)) for x in range(5)]
profiles = p1 + p2
process_2(it_merge(*profiles, sort=lambda x: x[2]))
def test_one(rates):
TIME = 24*60*60*7*4*2
process_2(UserProfileSource(0, TIME, profile=UserProfile(rates, 10, 0.1)))
list(users.values())[0].plot()
def test_change(rates, rates2):
TIME = 24*60*60*7*4*2
process_2(UserProfileChangeBehaviorSource(0, TIME, profile=UserProfile(rates, 10, 0.1),
profile2=UserProfile(rates2, 10, 0.1),
when_to_change=TIME//2))
def test_change_group(rates, rates2, rates3, rates4):
TIME = 24*60*60*7*4*2
p1 = [UserProfileChangeBehaviorSource(0, TIME, profile=UserProfile(rates, 10, 0.1),
profile2=UserProfile(rates2, 10, 0.1),
when_to_change=TIME//2) for x in range(10)]
p2 = [UserProfileChangeBehaviorSource(0, TIME, profile=UserProfile(rates3, 10, 0.1),
profile2=UserProfile(rates4, 10, 0.1),
when_to_change=TIME//2) for x in range(5)]
profiles = p1 + p2
process_2(it_merge(*profiles, sort=lambda x: x[2]))
print(alarms)
if __name__ == "__main__":
test_one(RATES_1m)
#test_change(RATES_1, RATES_1m)
#test_daily()
#test_change_group(RATES_1, RATES_1m, RATES_2, RATES_2) | mit | -7,144,471,955,658,401,000 | 35.154812 | 119 | 0.600579 | false | 3.181149 | true | false | false |
TOCyna/tabelinha | flask/lib/python2.7/site-packages/flask_wtf/recaptcha/widgets.py | 32 | 1798 | # -*- coding: utf-8 -*-
from flask import current_app, Markup
from flask import json
JSONEncoder = json.JSONEncoder
RECAPTCHA_HTML = u'''
<script src='https://www.google.com/recaptcha/api.js'></script>
<div class="g-recaptcha" data-sitekey="%(public_key)s"></div>
<noscript>
<div style="width: 302px; height: 352px;">
<div style="width: 302px; height: 352px; position: relative;">
<div style="width: 302px; height: 352px; position: absolute;">
<iframe src="https://www.google.com/recaptcha/api/fallback?k=%(public_key)s"
frameborder="0" scrolling="no"
style="width: 302px; height:352px; border-style: none;">
</iframe>
</div>
<div style="width: 250px; height: 80px; position: absolute; border-style: none;
bottom: 21px; left: 25px; margin: 0px; padding: 0px; right: 25px;">
<textarea id="g-recaptcha-response" name="g-recaptcha-response"
class="g-recaptcha-response"
style="width: 250px; height: 80px; border: 1px solid #c1c1c1;
margin: 0px; padding: 0px; resize: none;" value="">
</textarea>
</div>
</div>
</div>
</noscript>
'''
__all__ = ["RecaptchaWidget"]
class RecaptchaWidget(object):
def recaptcha_html(self, public_key):
html = current_app.config.get('RECAPTCHA_HTML', RECAPTCHA_HTML)
return Markup(html % dict(
public_key=public_key
))
def __call__(self, field, error=None, **kwargs):
"""Returns the recaptcha input HTML."""
try:
public_key = current_app.config['RECAPTCHA_PUBLIC_KEY']
except KeyError:
raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set")
return self.recaptcha_html(public_key)
| gpl-2.0 | 6,034,015,994,903,162,000 | 33.576923 | 85 | 0.603448 | false | 3.444444 | false | false | false |
audax/kll | funcparserlib/lexer.py | 5 | 4412 | # -*- coding: utf-8 -*-
# Copyright (c) 2008/2013 Andrey Vlasovskikh
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = ['make_tokenizer', 'Token', 'LexerError']
import re
class LexerError(Exception):
def __init__(self, place, msg):
self.place = place
self.msg = msg
def __str__(self):
s = 'cannot tokenize data'
line, pos = self.place
return '%s: %d,%d: "%s"' % (s, line, pos, self.msg)
class Token(object):
def __init__(self, type, value, start=None, end=None):
self.type = type
self.value = value
self.start = start
self.end = end
def __repr__(self):
return 'Token(%r, %r)' % (self.type, self.value)
def __eq__(self, other):
# FIXME: Case sensitivity is assumed here
return self.type == other.type and self.value == other.value
def _pos_str(self):
if self.start is None or self.end is None:
return ''
else:
sl, sp = self.start
el, ep = self.end
return '%d,%d-%d,%d:' % (sl, sp, el, ep)
def __str__(self):
s = "%s %s '%s'" % (self._pos_str(), self.type, self.value)
return s.strip()
@property
def name(self):
return self.value
def pformat(self):
return "%s %s '%s'" % (self._pos_str().ljust(20),
self.type.ljust(14),
self.value)
def make_tokenizer(specs):
"""[(str, (str, int?))] -> (str -> Iterable(Token))"""
def compile_spec(spec):
name, args = spec
return name, re.compile(*args)
compiled = [compile_spec(s) for s in specs]
def match_specs(specs, str, i, position):
line, pos = position
for type, regexp in specs:
m = regexp.match(str, i)
if m is not None:
value = m.group()
nls = value.count('\n')
n_line = line + nls
if nls == 0:
n_pos = pos + len(value)
else:
n_pos = len(value) - value.rfind('\n') - 1
return Token(type, value, (line, pos + 1), (n_line, n_pos))
else:
errline = str.splitlines()[line - 1]
raise LexerError((line, pos + 1), errline)
def f(str):
length = len(str)
line, pos = 1, 0
i = 0
while i < length:
t = match_specs(compiled, str, i, (line, pos))
yield t
line, pos = t.end
i += len(t.value)
return f
# This is an example of a token spec. See also [this article][1] for a
# discussion of searching for multiline comments using regexps (including `*?`).
#
# [1]: http://ostermiller.org/findcomment.html
_example_token_specs = [
('COMMENT', (r'\(\*(.|[\r\n])*?\*\)', re.MULTILINE)),
('COMMENT', (r'\{(.|[\r\n])*?\}', re.MULTILINE)),
('COMMENT', (r'//.*',)),
('NL', (r'[\r\n]+',)),
('SPACE', (r'[ \t\r\n]+',)),
('NAME', (r'[A-Za-z_][A-Za-z_0-9]*',)),
('REAL', (r'[0-9]+\.[0-9]*([Ee][+\-]?[0-9]+)*',)),
('INT', (r'[0-9]+',)),
('INT', (r'\$[0-9A-Fa-f]+',)),
('OP', (r'(\.\.)|(<>)|(<=)|(>=)|(:=)|[;,=\(\):\[\]\.+\-<>\*/@\^]',)),
('STRING', (r"'([^']|(''))*'",)),
('CHAR', (r'#[0-9]+',)),
('CHAR', (r'#\$[0-9A-Fa-f]+',)),
]
#tokenize = make_tokenizer(_example_token_specs)
| gpl-3.0 | 1,815,964,259,012,011,500 | 32.172932 | 80 | 0.538985 | false | 3.471282 | false | false | false |
crazystick/py-hub-ctrl | hub_ctrl.py | 1 | 8063 | #! /usr/bin/python2
"""
hub_ctrl.py - a tool to control port power/led of USB hub
Copyright (C) 2006, 2011 Free Software Initiative of Japan
Author: NIIBE Yutaka <[email protected]>
This file is a part of Gnuk, a GnuPG USB Token implementation.
Gnuk is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gnuk is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Modified 2014 Paul Adams - updated to be compatible with pyusb 1.0.0b1
"""
import usb.core
USB_RT_HUB = (usb.TYPE_CLASS | usb.RECIP_DEVICE)
USB_RT_PORT = (usb.TYPE_CLASS | usb.RECIP_OTHER)
USB_PORT_FEAT_RESET = 4
USB_PORT_FEAT_POWER = 8
USB_PORT_FEAT_INDICATOR = 22
USB_DIR_IN = 0x80 # device to host
COMMAND_SET_NONE = 0
COMMAND_SET_LED = 1
COMMAND_SET_POWER = 2
HUB_LED_GREEN = 2
def find_hubs(listing, verbose, busnum=None, devnum=None, hub=None):
number_of_hubs_with_feature = 0
hubs = []
devices = usb.core.find(find_all=True, bDeviceClass=usb.CLASS_HUB)
for dev in devices:
printout_enable = 0
if (listing
or (verbose
and ((dev.bus == busnum and dev.address == devnum)
or hub == number_of_hubs_with_feature))):
printout_enable = 1
desc = None
# Get USB Hub descriptor
desc = dev.ctrl_transfer(USB_DIR_IN | USB_RT_HUB,
usb.REQ_GET_DESCRIPTOR,
wValue = usb.DT_HUB << 8,
wIndex = 0,
data_or_wLength = 1024, timeout = 1000)
if not desc:
continue
# desc[3] is lower byte of wHubCharacteristics
if (desc[3] & 0x80) == 0 and (desc[3] & 0x03) >= 2:
# Hub doesn't have features of controling port power/indicator
continue
if printout_enable:
print "Hub #%d at %s:%03d" % (number_of_hubs_with_feature,
dev.bus, dev.address)
if (desc[3] & 0x03) == 0:
print " INFO: ganged power switching."
elif (desc[3] & 0x03) == 1:
print " INFO: individual power switching."
elif (desc[3] & 0x03) == 2 or (desc[3] & 0x03) == 3:
print " WARN: no power switching."
if (desc[3] & 0x80) == 0:
print " WARN: Port indicators are NOT supported."
hubs.append({ 'busnum' : dev.bus, 'devnum' : dev.address,
'indicator_support' : (desc[3] & 0x80) == 0x80,
'dev' : dev, 'num_ports' : desc[2] })
number_of_hubs_with_feature += 1
return hubs
def hub_port_status(handle, num_ports):
print " Hub Port Status:"
for i in range(num_ports):
port = i + 1
status = handle.ctrl_transfer(USB_RT_PORT | usb.ENDPOINT_IN,
usb.REQ_GET_STATUS,
wValue = 0,
wIndex = port, data_or_wLength = 4,
timeout = 1000)
print " Port %d: %02x%02x.%02x%02x" % (port, status[3], status[2],
status[1], status[0]),
if status[1] & 0x10:
print " indicator",
if status[1] & 0x08:
print " test" ,
if status[1] & 0x04:
print " highspeed",
if status[1] & 0x02:
print " lowspeed",
if status[1] & 0x01:
print " power",
if status[0] & 0x10:
print " RESET",
if status[0] & 0x08:
print " oc",
if status[0] & 0x04:
print " suspend",
if status[0] & 0x02:
print " enable",
if status[0] & 0x01:
print " connect",
print
import sys
COMMAND_SET_NONE = 0
COMMAND_SET_LED = 1
COMMAND_SET_POWER = 2
HUB_LED_GREEN = 2
def usage(progname):
print >> sys.stderr, """Usage: %s [{-h HUBNUM | -b BUSNUM -d DEVNUM}]
[-P PORT] [{-p [VALUE]|-l [VALUE]}]
""" % progname
def exit_with_usage(progname):
usage(progname)
exit(1)
if __name__ == '__main__':
busnum = None
devnum = None
listing = False
verbose = False
hub = None
port = 1
cmd = COMMAND_SET_NONE
if len(sys.argv) == 1:
listing = True
else:
try:
while len(sys.argv) >= 2:
option = sys.argv[1]
sys.argv.pop(1)
if option == '-h':
if busnum != None or devnum != None:
exit_with_usage(sys.argv[0])
hub = int(sys.argv[1])
sys.argv.pop(1)
elif option == '-b':
busnum = int(sys.argv[1])
sys.argv.pop(1)
elif option == '-d':
devnum = int(sys.argv[1])
sys.argv.pop(1)
elif option == '-P':
port = int(sys.argv[1])
sys.argv.pop(1)
elif option == '-l':
if cmd != COMMAND_SET_NONE:
exit_with_usage(sys.argv[0])
if len(sys.argv) > 1:
value = int(sys.argv[1])
sys.argv.pop(1)
else:
value = HUB_LED_GREEN
cmd = COMMAND_SET_LED
elif option == '-p':
if cmd != COMMAND_SET_NONE:
exit_with_usage(sys.argv[0])
if len(sys.argv) > 1:
value = int(sys.argv[1])
sys.argv.pop(1)
else:
value = 0
cmd = COMMAND_SET_POWER
elif option == '-v':
verbose = True
#if len(sys.argv) == 1:
# listing = True
else:
exit_with_usage(sys.argv[0])
except:
exit_with_usage(sys.argv[0])
if ((busnum != None and devnum == None)
or (busnum == None and devnum != None)):
exit_with_usage(sys.argv[0])
if hub == None and busnum == None:
hub = 0 # Default hub = 0
#if cmd == COMMAND_SET_NONE:
# cmd = COMMAND_SET_POWER
hubs = find_hubs(listing, verbose, busnum, devnum, hub)
if len(hubs) == 0:
print >> sys.stderr, "No hubs found."
exit(1)
if listing:
exit(0)
if hub == None:
for h in hubs:
if h['busnum'] == busnum and h['devnum'] == devnum:
dev_hub = h['dev']
nports = h['num_ports']
else:
dev_hub = hubs[hub]['dev']
nports = hubs[hub]['num_ports']
if cmd != COMMAND_SET_NONE:
if cmd == COMMAND_SET_POWER:
feature = USB_PORT_FEAT_POWER
index = port
if value:
request = usb.REQ_SET_FEATURE
else:
request = usb.REQ_CLEAR_FEATURE
else:
request = usb.REQ_SET_FEATURE
feature = USB_PORT_FEAT_INDICATOR
index = (value << 8) | port
if verbose:
print "Send control message (REQUEST=%d, FEATURE=%d, INDEX=%d) " % (request, feature, index)
dev_hub.ctrl_transfer(USB_RT_PORT, request, wValue = feature, wIndex=index, data_or_wLength=None, timeout=1000)
if verbose:
hub_port_status(dev_hub, nports)
| gpl-2.0 | 7,415,764,359,881,032,000 | 31.643725 | 119 | 0.488776 | false | 3.675023 | false | false | false |
guillaume-florent/aoc-utils | aocutils/fixes.py | 1 | 5033 | # coding: utf-8
r"""Fixing methods for shapes, faces, tolerance, continuity and
curve resampling
"""
import logging
# import ast
from OCC.Core.GCPnts import GCPnts_UniformDeflection
from OCC.Core.GeomAbs import GeomAbs_C2
from OCC.Core.GeomAbs import GeomAbs_C0, GeomAbs_C1 # dynamically built code
from OCC.Core.GeomAPI import GeomAPI_PointsToBSpline
from OCC.Core.ShapeFix import ShapeFix_ShapeTolerance, ShapeFix_Shape, \
ShapeFix_Face
from OCC.Core.ShapeUpgrade import ShapeUpgrade_ShapeDivideContinuity
from aocutils.tolerance import OCCUTILS_DEFAULT_TOLERANCE, \
OCCUTILS_FIXING_TOLERANCE
from aocutils.common import AssertIsDone
from aocutils.collections import point_list_to_tcolgp_array1_of_pnt
from aocutils.geom.curve import Curve
logger = logging.getLogger(__name__)
def fix_shape(shp, tolerance=OCCUTILS_FIXING_TOLERANCE):
r"""Fix a shape
Parameters
----------
shp : OCC.TopoDS.TopoDS_Shape
tolerance : float
Returns
-------
OCC.TopoDS.TopoDS_Shape
"""
fix = ShapeFix_Shape(shp)
# Returns (modifiable) the mode for applying fixes of ShapeFix_Shell,
# by default True
fix.SetFixFreeShellMode(True)
sf = fix.FixShellTool().GetObject()
sf.SetFixOrientationMode(True)
fix.LimitTolerance(tolerance)
fix.Perform() # Iterates on sub- shape and performs fixes.
return fix.Shape()
def fix_face(face, tolerance=OCCUTILS_FIXING_TOLERANCE):
r"""Fix a face
This operator allows to perform various fixes on face and its wires:
- fixes provided by ShapeFix_Wire,
- fixing orientation of wires,
- addition of natural bounds,
- fixing of missing seam edge,
- detection and removal of null-area wires.
Parameters
----------
face : ShapeFix_Face
tolerance : float
Returns
-------
OCC.TopoDS.TopoDS_Face
"""
fix = ShapeFix_Face(face)
fix.SetMaxTolerance(tolerance)
# Performs all the fixes, depending on modes
# Function Status returns the status of last call to Perform()
# ShapeExtend_OK : face was OK, nothing done
# ShapeExtend_DONE1: some wires are fixed
# ShapeExtend_DONE2: orientation of wires fixed
# ShapeExtend_DONE3: missing seam added
# ShapeExtend_DONE4: small area wire removed
# ShapeExtend_DONE5: natural bounds added
# ShapeExtend_FAIL1: some fails during fixing wires
# ShapeExtend_FAIL2: cannot fix orientation of wires
# ShapeExtend_FAIL3: cannot add missing seam
# ShapeExtend_FAIL4: cannot remove small area wire.
fix.Perform()
return fix.Face() # assumes no FixMissingSeam involved
def fix_tolerance(shape, tolerance=OCCUTILS_DEFAULT_TOLERANCE):
r"""Sets (enforces) tolerances in a shape to the given value.
Modifies tolerances of sub-shapes (vertices, edges, faces)
Parameters
----------
shape : OCC.TopoDS.TopoDS_Shape
tolerance : float
"""
# void SetTolerance (const TopoDS_Shape &shape, const Standard_Real preci,
# const TopAbs_ShapeEnum styp=TopAbs_SHAPE) const
ShapeFix_ShapeTolerance().SetTolerance(shape, tolerance)
def fix_continuity(edge, continuity=1):
r"""Fix the continuity of an edge
Parameters
----------
edge : OCC.TopoDS.TopoDS_Edge
continuity : int
Returns
-------
OCC.TopoDS.TopoDS_Shape
The upgrade resulting shape
"""
# ShapeUpgrade_ShapeDivideContinuity :
# API Tool for converting shapes with C0 geometry into C1 ones
shape_upgrade = ShapeUpgrade_ShapeDivideContinuity(edge)
continuity_constant = eval('GeomAbs_C' + str(continuity))
shape_upgrade.SetBoundaryCriterion(continuity_constant)
shape_upgrade.Perform()
return shape_upgrade.Result()
def resample_curve_uniform_deflection(curve,
deflection=0.5,
degree_min=3,
degree_max=8,
continuity=GeomAbs_C2,
tolerance=OCCUTILS_DEFAULT_TOLERANCE):
r"""Fits a bspline through the samples on curve
Parameters
----------
curve : OCC.TopoDS.TopoDS_Wire, OCC.TopoDS.TopoDS_Edge, curve
deflection : float
degree_min : int
degree_max : int
continuity : GeomAbs_C*
tolerance : float
Returns
-------
OCC.Geom.Geom_Curve
The resampled curve
"""
# crv = aocutils.convert.adapt.to_adaptor_3d(curve)
crv = Curve(curve).to_adaptor_3d()
defl = GCPnts_UniformDeflection(crv, deflection)
with AssertIsDone(defl, 'failed to compute UniformDeflection'):
logger.info('Number of points : %i' % defl.NbPoints())
sampled_pnts = [defl.Value(i) for i in range(1, defl.NbPoints())]
resampled_curve = GeomAPI_PointsToBSpline(
point_list_to_tcolgp_array1_of_pnt(sampled_pnts),
degree_min,
degree_max,
continuity,
tolerance)
return resampled_curve.Curve().GetObject()
| lgpl-3.0 | 7,753,895,359,617,903,000 | 28.781065 | 79 | 0.667197 | false | 3.577114 | false | false | false |
tw-ddis/tweet_parser | tweet_parser/getter_methods/tweet_links.py | 1 | 4932 | # Copyright 2018 Twitter, Inc.
# Licensed under the MIT License
# https://opensource.org/licenses/MIT
from tweet_parser.tweet_checking import is_original_format
def get_tweet_links(tweet):
"""
Get the links that are included in the Tweet as "urls"
(if there are no links in the Tweet, this returns an empty list)
This includes links that are included in quoted or retweeted Tweets
Returns unrolled or expanded_url information if it is available
Args:
tweet (Tweet): A Tweet object (must be a Tweet obj, not a dict)
Returns:
list (list of dicts): A list of dictionaries containing information
about urls. Each dictionary entity can have these keys; without
unwound url or expanded url Twitter data enrichments many of these
fields will be missing. \n
More information about the Twitter url enrichments at:
http://support.gnip.com/enrichments/expanded_urls.html and
http://support.gnip.com/enrichments/enhanced_urls.html
Example:
>>> result = [
... {
... # url that shows up in the tweet text
... 'display_url': "https://twitter.com/RobotPrinc...",
... # long (expanded) url
... 'expanded_url': "https://twitter.com/RobotPrincessFi",
... # characters where the display link is
... 'indices': [55, 88],
... 'unwound': {
... # description from the linked webpage
... 'description': "the Twitter profile of RobotPrincessFi",
... 'status': 200,
... # title of the webpage
... 'title': "the Twitter profile of RobotPrincessFi",
... # long (expanded) url}
... 'url': "https://twitter.com/RobotPrincessFi"},
... # the url that tweet directs to, often t.co
... 'url': "t.co/1234"}]
"""
if is_original_format(tweet):
# get the urls from the Tweet
try:
tweet_urls = tweet["entities"]["urls"]
except KeyError:
tweet_urls = []
# get the urls from the quote-tweet
if tweet.quoted_tweet is not None:
tweet_urls += tweet.quoted_tweet.tweet_links
# get the urls from the retweet
if tweet.retweeted_tweet is not None:
tweet_urls += tweet.retweeted_tweet.tweet_links
return tweet_urls
else:
# try to get normal urls
try:
tweet_urls = tweet["twitter_entities"]["urls"]
except KeyError:
tweet_urls = []
# get the urls from the quote-tweet
if tweet.quoted_tweet is not None:
tweet_urls += tweet.quoted_tweet.tweet_links
# get the urls from the retweet
if tweet.retweeted_tweet is not None:
tweet_urls += tweet.retweeted_tweet.tweet_links
# otherwise, we're now going to combine the urls to try to
# to get the same format as the og format urls, try to get enriched urls
try:
gnip_tweet_urls = {x["url"]: x for x in tweet["gnip"]["urls"]}
gnip_tweet_exp_urls = {x["expanded_url"]: x for x in tweet["gnip"]["urls"]}
except KeyError:
return tweet_urls
key_mappings = {"expanded_url": "url",
"expanded_status": "status",
"expanded_url_title": "title",
"expanded_url_description": "description"}
tweet_urls_expanded = []
for url in tweet_urls:
expanded_url = url
if url["url"] in gnip_tweet_urls:
expanded_url["unwound"] = {key_mappings[key]: value for key, value in gnip_tweet_urls[url["url"]].items() if key != "url"}
elif url.get("expanded_url", "UNAVAILABLE") in gnip_tweet_exp_urls:
expanded_url["unwound"] = {key_mappings[key]: value for key, value in gnip_tweet_urls[url["expanded_url"]].items() if key != "url"}
tweet_urls_expanded.append(expanded_url)
return tweet_urls_expanded
def get_most_unrolled_urls(tweet):
"""
For each url included in the Tweet "urls", get the most unrolled
version available. Only return 1 url string per url in tweet.tweet_links
In order of preference for "most unrolled"
(keys from the dict at tweet.tweet_links): \n
1. `unwound`/`url` \n
2. `expanded_url` \n
3. `url`
Args:
tweet (Tweet): A Tweet object or dict
Returns:
list (list of strings): a list of the most unrolled url available
"""
unrolled_urls = []
for url in get_tweet_links(tweet):
if url.get("unwound", {"url": None}).get("url", None) is not None:
unrolled_urls.append(url["unwound"]["url"])
elif url.get("expanded_url", None) is not None:
unrolled_urls.append(url["expanded_url"])
else:
unrolled_urls.append(url["url"])
return unrolled_urls
| mit | 8,992,743,042,978,524,000 | 41.153846 | 147 | 0.589011 | false | 3.898814 | false | false | false |
chuckSMASH/django-geohashing | geohashing/views.py | 1 | 1055 | """
API views for the geohashing app
"""
import datetime
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Day
from . import serializers
@api_view(['GET',])
def get_geohash(request, year, month, day, format=None):
"""
Return the geohash for a given day if we have a record for it.
Will return '400 Bad Request' if year, month and day don't
combine to form a valid date.
Will return '404 Not Found' if geohash data for this date is not found.
"""
try:
year = int(year)
month = int(month)
day = int(day)
geohash_date = datetime.date(year, month, day)
except (ValueError, TypeError):
return Response(status=status.HTTP_400_BAD_REQUEST)
try:
geohash = Day.objects.get(geohash_date=geohash_date)
except Day.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.DaySerializer(geohash)
return Response(serializer.data)
| mit | -2,380,809,722,700,656,000 | 26.763158 | 75 | 0.688152 | false | 3.808664 | false | false | false |
arbenson/mrtsqr | dumbo/hyy-python-hadoop/examples/TestText.py | 1 | 1261 | #!/usr/bin/env python
# ========================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hadoop.io.SequenceFile import CompressionType
from hadoop.io import Text
from hadoop.io import SequenceFile
def writeData(writer):
key = Text()
value = Text()
key.set('Key')
value.set('Value')
writer.append(key, value)
if __name__ == '__main__':
writer = SequenceFile.createWriter('test.seq', Text, Text)
writeData(writer)
writer.close()
| bsd-2-clause | -4,914,641,547,376,163,000 | 35.028571 | 74 | 0.694687 | false | 4.245791 | false | false | false |
nickfishman/autobahn-tests | pubsub/server.py | 1 | 1277 | import argparse
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, \
WampServerProtocol
parser = argparse.ArgumentParser(
"Basic autobahn pubsub server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-b", "--base_uri", type=str, help="autobahn prefix uri to use", default="http://autobahn-pubsub/channels/")
parser.add_argument("-d", "--debug", action='store_true', help="whether to enable debugging", default=False)
parser.add_argument("-u", "--websocket_url", type=str, help="autobahn websocket url to use", default="ws://localhost:9000")
ARGS = parser.parse_args()
class PubSubServer(WampServerProtocol):
def onSessionOpen(self):
self.registerForPubSub(ARGS.base_uri, True)
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = WampServerFactory(ARGS.websocket_url, debugWamp=ARGS.debug)
factory.protocol = PubSubServer
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run() | mit | 2,841,313,547,885,588,500 | 31.769231 | 128 | 0.732185 | false | 3.617564 | false | false | false |
gsi-upm/SmartSim | smartbody/data/examples/GazeDemo.py | 1 | 4382 | print "|--------------------------------------------|"
print "| Starting Gaze Demo |"
print "|--------------------------------------------|"
# Add asset paths
scene.addAssetPath('mesh', 'mesh')
scene.addAssetPath("script", "behaviorsets")
scene.addAssetPath('motion', 'ChrBrad')
scene.addAssetPath('motion', 'ChrRachel')
scene.addAssetPath('script','scripts')
scene.addAssetPath('script','examples')
scene.loadAssets()
# Set scene parameters and camera
print 'Configuring scene parameters and camera'
scene.setScale(1.0)
scene.setBoolAttribute('internalAudio', True)
scene.run('default-viewer.py')
camera = getCamera()
camera.setEye(0, 1.68, 2.58)
camera.setCenter(0, 0.89, -0.14)
camera.setUpVector(SrVec(0, 1, 0))
camera.setScale(1)
camera.setFov(1.0472)
camera.setFarPlane(100)
camera.setNearPlane(0.1)
camera.setAspectRatio(0.966897)
scene.getPawn('camera').setPosition(SrVec(0, -5, 0))
# Set joint map for Brad and Rachel
print 'Setting up joint map for Brad and Rachel'
scene.run('zebra2-map.py')
zebra2Map = scene.getJointMapManager().getJointMap('zebra2')
bradSkeleton = scene.getSkeleton('ChrBrad.sk')
zebra2Map.applySkeleton(bradSkeleton)
zebra2Map.applyMotionRecurse('ChrBrad')
rachelSkeleton = scene.getSkeleton('ChrRachel.sk')
zebra2Map.applySkeleton(rachelSkeleton)
zebra2Map.applyMotionRecurse('ChrRachel')
# Setting up Brad and Rachel
print 'Setting up Brad'
brad = scene.createCharacter('ChrBrad', '')
bradSkeleton = scene.createSkeleton('ChrBrad.sk')
brad.setSkeleton(bradSkeleton)
bradPos = SrVec(.35, 0, 0)
brad.setPosition(bradPos)
brad.setHPR(SrVec(-17, 0, 0))
brad.createStandardControllers()
# Deformable mesh
brad.setVec3Attribute('deformableMeshScale', .01, .01, .01)
brad.setStringAttribute('deformableMesh', 'ChrBrad.dae')
# setup gestures
scene.run('BehaviorSetGestures.py')
setupBehaviorSet()
retargetBehaviorSet('ChrBrad')
bml.execBML('ChrBrad', '<body posture="ChrBrad@Idle01" ready="0" relax="0"/>')
print 'Setting up Rachel'
rachel = scene.createCharacter('ChrRachel', '')
rachelSkeleton = scene.createSkeleton('ChrRachel.sk')
rachel.setSkeleton(rachelSkeleton)
rachelPos = SrVec(-.35, 0, 0)
rachel.setPosition(rachelPos)
rachel.setHPR(SrVec(17, 0, 0))
rachel.createStandardControllers()
# Deformable mesh
rachel.setVec3Attribute('deformableMeshScale', .01, .01, .01)
rachel.setStringAttribute('deformableMesh', 'ChrRachel.dae')
# setup gestures
scene.run('BehaviorSetFemaleGestures.py')
setupBehaviorSet()
retargetBehaviorSet('ChrRachel')
bml.execBML('ChrRachel', '<body posture="ChrConnor@IdleStand01" ready=".2" relax=".2"/>')
# Add pawns in scene
print 'Adding pawn to scene'
gazeTarget = scene.createPawn('gazeTarget')
gazeTarget.setPosition(SrVec(0.75, 1.54, 0.33))
# Turn on GPU deformable geometry for all
for name in scene.getCharacterNames():
scene.getCharacter(name).setStringAttribute("displayType", "GPUmesh")
# Make characters gaze at pawn
bml.execBML('ChrRachel', '<gaze sbm:joint-range="EYES CHEST" target="gazeTarget"/>')
# Variables to move pawn
gazeX = -2
gazeZ = 2
dirX = 1
dirZ = 1
speed = 0.005
lastTime = -8
class GazeDemo(SBScript):
def update(self, time):
global gazeX, gazeZ, dirX, dirZ, speed, lastTime
# Change direction when hit border
if gazeX > 2:
dirX = -1
elif gazeX < -2:
dirX = 1
if gazeZ > 2:
dirZ = -1
elif gazeZ < -0:
dirZ = 1
gazeX = gazeX + speed * dirX
gazeZ = gazeZ + speed * dirZ
gazeTarget.setPosition(SrVec(gazeX, 2, gazeZ))
diff = time - lastTime
if diff > 10:
diff = 0
lastTime = time
#Gaze at joints
bml.execBMLAt(0, 'ChrBrad', '<gaze target="ChrRachel:base" sbm:joint-speed="800" sbm:joint-smooth="0.2"/>')
bml.execBMLAt(2, 'ChrBrad', '<gaze target="ChrBrad:l_wrist" sbm:joint-speed="800" sbm:joint-smooth="0.2"/>')
bml.execBMLAt(4, 'ChrBrad', '<gaze target="ChrBrad:r_ankle" sbm:joint-speed="800" sbm:joint-smooth="0.2"/>')
bml.execBMLAt(6, 'ChrBrad', '<gaze target="ChrRachel:l_wrist" sbm:joint-speed="800" sbm:joint-smooth="0.2"/>')
bml.execBMLAt(8, 'ChrBrad', '<gaze target="ChrRachel:spine4" sbm:joint-speed="800" sbm:joint-smooth="0.2"/>')
# Run the update script
scene.removeScript('gazedemo')
gazedemo = GazeDemo()
scene.addScript('gazedemo', gazedemo)
| apache-2.0 | 9,040,150,471,584,670,000 | 31.19697 | 113 | 0.693063 | false | 2.5083 | false | false | false |
ccubed/EvenniaGames | FSuns/server/conf/settings.py | 1 | 5190 | """
Evennia settings file.
The full options are found in the default settings file found here:
{settings_default}
Note: Don't copy more from the default file than you actually intend to
change; this will make sure that you don't overload upstream updates
unnecessarily.
"""
# Use the defaults from Evennia unless explicitly overridden
import os
from evennia.settings_default import *
from machina import get_apps as get_machina_apps
from machina import MACHINA_MAIN_TEMPLATE_DIR
from machina import MACHINA_MAIN_STATIC_DIR
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = 'Radial Blur'
# Path to the game directory (use EVENNIA_DIR to refer to the
# core evennia library)
GAME_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, "server", "logs")
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# Other defaults
PROTOTYPE_MODULES = ("world.prototypes",)
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql_psycopg2',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, "server", "evennia.db3"),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
######################################################################
# Django web features
# (don't remove these entries, they are needed to override the default
# locations with your actual GAME_DIR locations at run-time)
######################################################################
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'web.urls'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Directories from which static files will be gathered from.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "web", "static_overrides"),
os.path.join(EVENNIA_DIR, "web", "static"),
MACHINA_MAIN_STATIC_DIR,
)
# We setup the location of the website template as well as the admin site.
TEMPLATE_DIRS = (
os.path.join(GAME_DIR, "web", "template_overrides", ACTIVE_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "templates", ACTIVE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "templates"),
MACHINA_MAIN_TEMPLATE_DIR,
)
# Installed Apps
INSTALLED_APPS += (
'django.contrib.humanize',
'markdown_deux',
'bootstrapform',
'helpdesk',
'django.contrib.messages',
'mptt',
'haystack',
'widget_tweaks',
'django_markdown',
'happenings',
'machina',
'machina.apps.forum',
'machina.apps.forum_conversation',
'machina.apps.forum_conversation.forum_attachments',
'machina.apps.forum_conversation.forum_polls',
'machina.apps.forum_feeds',
'machina.apps.forum_moderation',
'machina.apps.forum_search',
'machina.apps.forum_tracking',
'machina.apps.forum_member',
'machina.apps.forum_permission',
'bootstrap3',
'jquery'
)
TEMPLATE_CONTEXT_PROCESSORS += (
'machina.core.context_processors.metadata',
)
MIDDLEWARE_CLASSES += (
'machina.apps.forum_permission.middleware.ForumPermissionMiddleware',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'machina_attachments': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp',
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# The secret key is randomly seeded upon creation. It is used to sign
# Django's cookies. Do not share this with anyone. Changing it will
# log out all active web browsing sessions. Game web client sessions
# may survive.
SECRET_KEY = '2uX-V:tTFfYdZcQ@oJ"wH+NO{1jk9"?83KUsp,g4'
| mit | -3,653,495,970,787,921,400 | 32.057325 | 87 | 0.634682 | false | 3.509128 | false | false | false |
frankosan/pypers | pypers/steps/utils/split.py | 1 | 2381 | import os
from pypers.core.step import CmdLineStep
from pypers.utils import utils
class Split(CmdLineStep):
spec = {
"name": "split",
"version": "1.0",
"descr": [
"Splits an input file in several chuncks"
],
"args":
{
"inputs": [
{
"name" : "input_file",
"type" : "file",
"descr" : "input file name",
},
{
"name" : "nchunks",
"type" : "int",
"descr" : "number of chunks in which the input file get splitted",
},
],
"outputs": [
{
"name" : "output_files",
"type" : "file",
"descr" : "output file names",
}
],
"params" : [
{
"name" : "prefix",
"value" : "chunk_",
"descr" : "string prefix on the output files",
"readonly" : True,
},
{
"name" : "extension",
"value" : ".bed",
"descr" : "extension added to the splitted files",
"readonly" : True,
}
]
},
"cmd": [
"/usr/bin/split -l {{line_chunks}} --suffix-length=4 -d {{input_file}} {{full_prefix}}",
]
}
def process(self):
with open(self.input_file) as fh:
lines = len(fh.readlines())
self.line_chunks = int(lines / self.nchunks)
self.full_prefix = os.path.join(self.output_dir, self.prefix)
self.submit_cmd(self.render())
self.output_files = []
for filename in os.listdir(self.output_dir):
if filename.startswith(self.prefix):
original_path = os.path.join(self.output_dir, filename)
new_path = original_path + self.extension
os.rename(original_path, new_path)
self.output_files.append(new_path)
self.meta['job']['input_file'] = []
for output_file in self.output_files:
self.meta['job']['input_file'].append(self.input_file)
| gpl-3.0 | 2,558,659,611,822,990,300 | 29.922078 | 100 | 0.409492 | false | 4.297834 | false | false | false |
dsibournemouth/autoweka | scripts/boxplot.py | 2 | 2652 | #!/usr/bin/python
import argparse
import os
import sqlite3
import sys
import matplotlib as mpl
mpl.use('Agg')
mpl.rc('font', family='Liberation Serif')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
from pylab import *
from collections import OrderedDict
from config import *
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(__file__))
globals().update(load_config(parser))
parser.add_argument('--dataset', choices=datasets, required=False)
parser.add_argument('--error', choices=['error', 'test_error', 'full_cv_error'], required=True)
args = parser.parse_args()
selected_datasets = [args.dataset] if args.dataset else datasets
type_error = args.error
for dataset in selected_datasets:
conn = sqlite3.connect(database_file)
c = conn.cursor()
query = "SELECT strategy,generation,%s FROM results WHERE dataset='%s' AND %s<100000" % (
type_error, dataset, type_error)
results = c.execute(query).fetchall()
conn.close()
if not results:
raise Exception('No results')
data = dict()
for row in results:
key = "%s-%s" % (row[0], row[1])
if key == 'DEFAULT-CV':
key = 'WEKA-DEF'
if key == 'RAND-CV':
key = 'RAND'
if key == 'SMAC-CV': # TO REMOVE
key = 'SMAC'
if key == 'TPE-CV': # TO REMOVE
key = 'TPE'
if key not in data:
data[key] = []
try:
data[key].append(float(row[2]))
except Exception, e:
print "[ERROR] ", e, " -- ", row[2]
# data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
labels = ['RAND', 'SMAC', 'TPE']
data = [data['RAND'], data['SMAC'], data['TPE']]
fig, ax = plt.subplots(figsize=(6, 2))
ax.set_aspect(6)
fig.canvas.draw()
# bp = plt.boxplot(data.values(), vert=False, whis='range') # , labels=data.keys())
# ytickNames = plt.setp(ax, yticklabels=data.keys())
bp = plt.boxplot(data[::-1], vert=False, whis='range', widths=0.8) # , labels=data.keys())
ytickNames = plt.setp(ax, yticklabels=labels[::-1])
plt.setp(ytickNames, fontsize=10)
xlim(0, 100)
plt.margins(0.05, 0.05)
xlabel('% misclassification')
# ylabel('Strategy')
title(dataset)
tight_layout()
savefig('../plots%s/boxplot.%s.%s.png' % (suffix, type_error, dataset))
# show()
if __name__ == "__main__":
main()
| gpl-3.0 | -6,118,261,727,262,887,000 | 28.797753 | 99 | 0.556184 | false | 3.569314 | false | false | false |
google-aai/tf-serving-k8s-tutorial | testing/estimator_eager_example.py | 1 | 1165 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample tensorflow eager execution for helper functions.
Make sure to create a new virtual environment (Python 3, TF 1.5)
before executing this!
"""
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from helper_functions import preprocess_image
tfe.enable_eager_execution()
# Test preprocess_image
with open("../client/cat_sample.jpg", "rb") as imageFile:
jpeg_str = imageFile.read()
result = preprocess_image(jpeg_str)
assert result.shape == (224, 224, 3)
assert tf.reduce_max(result) <= 0.5 # Notice tf functions here!
assert tf.reduce_min(result) >= -0.5
| apache-2.0 | -6,809,092,252,593,347,000 | 33.264706 | 74 | 0.747639 | false | 3.770227 | false | false | false |
douzepouze/django-markdown-tag | setup.py | 1 | 1405 | #!/usr/bin/env python
import sys
import os
from setuptools import setup, find_packages
_top_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(_top_dir, "lib"))
try:
import markdown_tag
finally:
del sys.path[0]
README = open(os.path.join(_top_dir, 'README.md')).read()
setup(name='django-markdown-tag',
version=markdown_tag.__version__,
description="a Django app that provides template tags for using Markdown (using the python-markdown processor)",
long_description=README,
classifiers=[c.strip() for c in """
Development Status :: 5 - Production/Stable
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Topic :: Internet :: WWW/HTTP
""".split('\n') if c.strip()],
keywords='django markdown text markup html',
author='Trent Mick, Steffen Görtz',
author_email='[email protected], [email protected]',
maintainer='Steffen Görtz',
maintainer_email='[email protected]',
url='http://github.com/douzepouze/django-markdown-tag',
license='MIT',
install_requires = ['markdown'],
packages=["markdown_tag"],
package_dir={"": "lib"},
include_package_data=True,
zip_safe=False,
)
| mit | -2,465,374,674,965,124,600 | 32.404762 | 116 | 0.66144 | false | 3.597436 | false | false | false |
homhei/glance | glance/utils/memcached.py | 1 | 1958 | #!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-8-17
#Copyright 2013 nuoqingyun xuqifeng
import datetime
import calendar
import time
from oslo.config import cfg
memcache_opts = [
cfg.ListOpt('memcached_servers',
default=['127.0.0.1:11211'],
help='Memcached servers or None for in process cache.'),
]
CONF = cfg.CONF
CONF.register_opts(memcache_opts)
def get_client(memcached_servers=None):
client_cls = Client
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
try:
import memcache
client_cls = memcache.Client
except ImportError:
pass
return client_cls(memcached_servers, debug=0)
class Client(object):
def __init__(self, *args, **kwargs):
self.cache = {}
def get(self, key):
now = time.time()
for k in self.cache.keys():
(timeout, _value) = self.cache[k]
if timeout and now >= timeout:
del self.cache[k]
return self.cache.get(key, (0, None))[1]
def set(self, key, value, time=0, min_compress_len=0):
timeout = 0
if time != 0:
timeout = calendar.timegm((datetime.datetime.utcnow()).timetuple()) + time
self.cache[key] = (timeout, value)
return True
def add(self, key, value, time=0, min_compress_len=0):
if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
def incr(self, key, delta=1):
value = self.get(key)
if value is None:
return None
new_value = int(value) + delta
self.cache[key] = (self.cache[key][0], str(new_value))
return new_value
def delete(self, key, time=0):
if key in self.cache:
del self.cache[key]
| apache-2.0 | -6,917,844,398,799,297,000 | 25.821918 | 86 | 0.578141 | false | 3.673546 | false | false | false |
darvelo/chime | test/unit/app.py | 1 | 149653 | # -- coding: utf-8 --
from __future__ import absolute_import
from unittest import main, TestCase
from tempfile import mkdtemp
from os.path import join, exists, dirname, isdir, abspath, sep
from urlparse import urlparse, urljoin
from os import environ, mkdir
from shutil import rmtree, copytree
from re import search, sub
import random
from datetime import date, timedelta, datetime
import sys
from chime.repo_functions import ChimeRepo
from slugify import slugify
from multiprocessing import Process
import json
import time
import logging
import tempfile
logging.disable(logging.CRITICAL)
repo_root = abspath(join(dirname(__file__), '..'))
sys.path.insert(0, repo_root)
from box.util.rotunicode import RotUnicode
from httmock import response, HTTMock
from mock import MagicMock, patch
from bs4 import Comment
from chime import (
create_app, repo_functions, google_api_functions, view_functions,
publish, errors)
from chime import constants
from unit.chime_test_client import ChimeTestClient
import codecs
codecs.register(RotUnicode.search_function)
# these patterns help us search the HTML of a response to determine if the expected page loaded
PATTERN_BRANCH_COMMENT = u'<!-- branch: {} -->'
PATTERN_AUTHOR_COMMENT = u'<!-- author: {} -->'
PATTERN_TASK_COMMENT = u'<!-- task: {} -->'
PATTERN_TEMPLATE_COMMENT = u'<!-- template name: {} -->'
PATTERN_FILE_COMMENT = u'<!-- file type: {file_type}, file name: {file_name}, file title: {file_title} -->'
PATTERN_OVERVIEW_ITEM_CREATED = u'<p>The "{created_name}" {created_type} was created by {author_email}.</p>'
PATTERN_OVERVIEW_ACTIVITY_STARTED = u'<p>The "{activity_name}" activity was started by {author_email}.</p>'
PATTERN_OVERVIEW_COMMENT_BODY = u'<div class="comment__body">{comment_body}</div>'
PATTERN_OVERVIEW_ITEM_DELETED = u'<p>The "{deleted_name}" {deleted_type} {deleted_also}was deleted by {author_email}.</p>'
PATTERN_FLASH_TASK_DELETED = u'You deleted the "{description}" activity!'
PATTERN_FLASH_SAVED_CATEGORY = u'<li class="flash flash--notice">Saved changes to the {title} topic! Remember to submit this change for feedback when you\'re ready to go live.</li>'
PATTERN_FLASH_CREATED_CATEGORY = u'Created a new topic named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_CREATED_ARTICLE = u'Created a new article named {title}! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_SAVED_ARTICLE = u'Saved changes to the {title} article! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FLASH_DELETED_ARTICLE = u'The "{title}" article was deleted! Remember to submit this change for feedback when you\'re ready to go live.'
PATTERN_FORM_CATEGORY_TITLE = u'<input name="en-title" type="text" value="{title}" class="directory-modify__name" placeholder="Crime Statistics and Maps">'
PATTERN_FORM_CATEGORY_DESCRIPTION = u'<textarea name="en-description" class="directory-modify__description" placeholder="Crime statistics and reports by district and map">{description}</textarea>'
# review stuff
PATTERN_REQUEST_FEEDBACK_BUTTON = u'<button class="toolbar__item button button--orange" type="submit" name="request_feedback" value="Request Feedback">Request Feedback</button>'
PATTERN_UNREVIEWED_EDITS_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Unreviewed Edits</a>'
PATTERN_ENDORSE_BUTTON = u'<button class="toolbar__item button button--green" type="submit" name="endorse_edits" value="Endorse Edits">Endorse Edits</button>'
PATTERN_FEEDBACK_REQUESTED_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Feedback requested</a>'
PATTERN_PUBLISH_BUTTON = u'<button class="toolbar__item button button--blue" type="submit" name="merge" value="Publish">Publish</button>'
PATTERN_READY_TO_PUBLISH_LINK = u'<a href="/tree/{branch_name}/" class="toolbar__item button">Ready to publish</a>'
class TestAppConfig (TestCase):
# in TestAppConfig
def test_missing_values(self):
self.assertRaises(KeyError, lambda: create_app({}))
# in TestAppConfig
def test_present_values(self):
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app(create_app_environ)
# in TestAppConfig
def test_error_template_args(self):
''' Default error template args are generated as expected
'''
create_app_environ = {}
create_app_environ['RUNNING_STATE_DIR'] = 'Yo'
create_app_environ['GA_CLIENT_ID'] = 'Yo'
create_app_environ['GA_CLIENT_SECRET'] = 'Yo'
create_app_environ['BROWSERID_URL'] = 'Hey'
create_app_environ['LIVE_SITE_URL'] = 'Hey'
fake_support_email = u'[email protected]'
fake_support_phone_number = u'(123) 456-7890'
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = fake_support_email
create_app_environ['SUPPORT_PHONE_NUMBER'] = fake_support_phone_number
app = create_app(create_app_environ)
template_args = errors.common_error_template_args(app.config)
self.assertEqual(len(template_args), 3)
self.assertTrue('activities_path' in template_args)
self.assertTrue('support_email' in template_args)
self.assertTrue('support_phone_number' in template_args)
self.assertEqual(template_args['support_email'], fake_support_email)
self.assertEqual(template_args['support_phone_number'], fake_support_phone_number)
# in TestAppConfig
def test_for_constant_name_conflicts(self):
''' None of the constant names defined in constants.py conflict with reserved config variable names
'''
flask_reserved_config_names = ['DEBUG', 'TESTING', 'PROPAGATE_EXCEPTIONS', 'PRESERVE_CONTEXT_ON_EXCEPTION', 'SECRET_KEY', 'SESSION_COOKIE_NAME', 'SESSION_COOKIE_DOMAIN', 'SESSION_COOKIE_PATH', 'SESSION_COOKIE_HTTPONLY', 'SESSION_COOKIE_SECURE', 'PERMANENT_SESSION_LIFETIME', 'USE_X_SENDFILE', 'LOGGER_NAME', 'SERVER_NAME', 'APPLICATION_ROOT', 'MAX_CONTENT_LENGTH', 'SEND_FILE_MAX_AGE_DEFAULT', 'TRAP_HTTP_EXCEPTIONS', 'TRAP_BAD_REQUEST_ERRORS', 'PREFERRED_URL_SCHEME', 'JSON_AS_ASCII', 'JSON_SORT_KEYS', 'JSONIFY_PRETTYPRINT_REGULAR']
chime_reserved_config_names = ['RUNNING_STATE_DIR', 'REPO_PATH', 'WORK_PATH', 'AUTH_DATA_HREF', 'BROWSERID_URL', 'GA_CLIENT_ID', 'GA_CLIENT_SECRET', 'GA_REDIRECT_URI', 'SUPPORT_EMAIL_ADDRESS', 'SUPPORT_PHONE_NUMBER', 'GDOCS_CLIENT_ID', 'GDOCS_CLIENT_SECRET', 'GITHUB_CLIENT_ID', 'GITHUB_CLIENT_SECRET', 'LIVE_SITE_URL', 'PUBLISH_SERVICE_URL']
check_names = flask_reserved_config_names + chime_reserved_config_names
for reserved_name in check_names:
self.assertFalse(hasattr(constants, reserved_name), u'The reserved config variable name {} is present in constants!'.format(reserved_name))
class TestApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestApp-')
self.work_path = mkdtemp(prefix='chime-repo-clones-')
self.publish_path = mkdtemp(prefix='chime-publish-path-')
repo_path = dirname(abspath(__file__)) + '/../test-app.git'
temp_repo_dir = mkdtemp(prefix='chime-root')
temp_repo_path = temp_repo_dir + '/test-app.git'
copytree(repo_path, temp_repo_path)
self.origin = ChimeRepo(temp_repo_path)
repo_functions.ignore_task_metadata_on_merge(self.origin)
self.clone1 = self.origin.clone(mkdtemp(prefix='chime-'))
repo_functions.ignore_task_metadata_on_merge(self.clone1)
fake_author_email = u'[email protected]'
self.session = dict(email=fake_author_email)
environ['GIT_AUTHOR_NAME'] = ' '
environ['GIT_COMMITTER_NAME'] = ' '
environ['GIT_AUTHOR_EMAIL'] = self.session['email']
environ['GIT_COMMITTER_EMAIL'] = self.session['email']
create_app_environ = {}
create_app_environ['SINGLE_USER'] = 'Yes'
create_app_environ['GA_CLIENT_ID'] = 'client_id'
create_app_environ['GA_CLIENT_SECRET'] = 'meow_secret'
self.ga_config_dir = mkdtemp(prefix='chime-config-')
create_app_environ['RUNNING_STATE_DIR'] = self.ga_config_dir
create_app_environ['WORK_PATH'] = self.work_path
create_app_environ['REPO_PATH'] = temp_repo_path
create_app_environ['AUTH_DATA_HREF'] = 'http://example.com/auth.csv'
create_app_environ['BROWSERID_URL'] = 'http://localhost'
create_app_environ['LIVE_SITE_URL'] = 'http://example.org/'
create_app_environ['PUBLISH_PATH'] = self.publish_path
create_app_environ['SUPPORT_EMAIL_ADDRESS'] = u'[email protected]'
create_app_environ['SUPPORT_PHONE_NUMBER'] = u'(123) 456-7890'
self.app = create_app(create_app_environ)
# write a tmp config file
config_values = {
"access_token": "meowser_token",
"refresh_token": "refresh_meows",
"profile_id": "12345678",
"project_domain": ""
}
with self.app.app_context():
google_api_functions.write_ga_config(config_values, self.app.config['RUNNING_STATE_DIR'])
random.choice = MagicMock(return_value="P")
self.test_client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def auth_csv_example_disallowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\n''')
raise Exception('Asked for unknown URL ' + url.geturl())
def auth_csv_example_allowed(self, url, request):
if url.geturl() == 'http://example.com/auth.csv':
return response(200, '''Email domain,Organization\nexample.com,Example Org\n*,Anyone''')
raise Exception('Asked for unknown URL ' + url.geturl())
def mock_persona_verify_erica(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "[email protected]"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_non_roman(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "੯ूᵕू ໒꒱ƶƵ@快速狐狸.com"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_frances(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "[email protected]"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_persona_verify_william(self, url, request):
if url.geturl() == 'https://verifier.login.persona.org/verify':
return response(200, '''{"status": "okay", "email": "[email protected]"}''', headers=dict(Link='<https://api.github.com/user/337792/repos?page=1>; rel="prev", <https://api.github.com/user/337792/repos?page=1>; rel="first"'))
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_authorization(self, url, request):
if 'https://accounts.google.com/o/oauth2/auth' in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_successful_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(200, '''{"access_token": "meowser_token", "token_type": "meowser_type", "refresh_token": "refresh_meows", "expires_in": 3920}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "[email protected]"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_failed_google_callback(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_TOKENS_URL in url.geturl():
return response(500, '''{}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "[email protected]"}]}''')
elif google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"items": [{"defaultProfileId": "12345678", "name": "Property One", "websiteUrl": "http://propertyone.example.com"}, {"defaultProfileId": "87654321", "name": "Property Two", "websiteUrl": "http://propertytwo.example.com"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_invalid_credentials_response(self, url, request):
if 'https://www.googleapis.com/analytics/' in url.geturl() or google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(401, '''{"error": {"code": 401, "message": "Invalid Credentials", "errors": [{"locationType": "header", "domain": "global", "message": "Invalid Credentials", "reason": "authError", "location": "Authorization"}]}}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(403, '''{"error": {"code": 403, "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "errors": [{"domain": "usageLimits", "message": "Access Not Configured. The API (Google+ API) is not enabled for your project. Please use the Google Developers Console to update your configuration.", "reason": "accessNotConfigured", "extendedHelp": "https://console.developers.google.com"}]}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_no_properties_response(self, url, request):
if google_api_functions.GOOGLE_ANALYTICS_PROPERTIES_URL in url.geturl():
return response(200, '''{"kind": "analytics#webproperties", "username": "[email protected]", "totalResults": 0, "startIndex": 1, "itemsPerPage": 1000, "items": []}''')
elif google_api_functions.GOOGLE_PLUS_WHOAMI_URL in url.geturl():
return response(200, '''{"displayName": "Jane Doe", "emails": [{"type": "account", "value": "[email protected]"}]}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_google_analytics(self, url, request):
start_date = (date.today() - timedelta(days=7)).isoformat()
end_date = date.today().isoformat()
url_string = url.geturl()
if 'ids=ga%3A12345678' in url_string and 'end-date=' + end_date in url_string and 'start-date=' + start_date in url_string and 'filters=ga%3ApagePath%3D~%28hello.html%7Chello%29' in url_string:
return response(200, '''{"ga:previousPagePath": "/about/", "ga:pagePath": "/lib/", "ga:pageViews": "12", "ga:avgTimeOnPage": "56.17", "ga:exiteRate": "43.75", "totalsForAllResults": {"ga:pageViews": "24", "ga:avgTimeOnPage": "67.36363636363636"}}''')
else:
return self.auth_csv_example_allowed(url, request)
def mock_internal_server_error(self, url, request):
from flask import abort
abort(500)
def mock_exception(self, url, request):
raise Exception(u'This is a generic exception.')
# in TestApp
def test_no_cache_headers(self):
''' The expected no-cache headers are in the server response.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
erica.open_link('/')
# The static no-cache headers are as expected
self.assertEqual(erica.headers['Cache-Control'], 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0')
self.assertEqual(erica.headers['Pragma'], 'no-cache')
self.assertEqual(erica.headers['Expires'], '-1')
# The last modified date is within 10 seconds of now
last_modified = datetime.strptime(erica.headers['Last-Modified'], '%Y-%m-%d %H:%M:%S.%f')
delta = datetime.now() - last_modified
self.assertTrue(delta.seconds < 10)
# in TestApp
def test_bad_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get('/')
self.assertFalse('[email protected]' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_disallowed):
response = self.test_client.get('/')
self.assertFalse('Create' in response.data)
# in TestApp
def test_login(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get('/')
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get('/')
self.assertTrue('Start' in response.data)
self.assertTrue('http://example.org' in response.data, 'Should see LIVE_SITE_URL in response')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get('/')
self.assertFalse('Start' in response.data)
# in TestApp
def test_login_splat(self):
''' Check basic log in / log out flow without talking to Persona.
'''
response = self.test_client.get('/')
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_william):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get('/')
self.assertTrue('Start' in response.data)
# in TestApp
def test_default_auth_href_warning(self):
''' Check basic log in / log out flow without talking to Persona.
'''
with patch('chime.view_functions.AUTH_DATA_HREF_DEFAULT', new='http://example.com/auth.csv'):
response = self.test_client.get('/not-allowed')
expected = 'Your Chime <code>AUTH_DATA_HREF</code> is set to default value.'
self.assertTrue(expected in response.data, 'Should see a warning')
# in TestApp
@patch('chime.view_functions.AUTH_CHECK_LIFESPAN', new=1.0)
def test_login_timeout(self):
''' Check basic log in / log out flow with auth check lifespan.
'''
response = self.test_client.get('/')
self.assertFalse('Start' in response.data)
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.get('/')
self.assertTrue('Start' in response.data)
with patch('chime.view_functions.get_auth_data_file') as get_auth_data_file:
# Show that email status does not require a call to auth CSV.
response = self.test_client.get('/')
self.assertEqual(response.status_code, 200, 'Should have worked')
self.assertEqual(get_auth_data_file.call_count, 0, 'Should not have called get_auth_data_file()')
# Show that a call to auth CSV was made, outside the timeout period.
time.sleep(1.1)
response = self.test_client.get('/')
self.assertEqual(get_auth_data_file.call_count, 1, 'Should have called get_auth_data_file()')
with HTTMock(self.auth_csv_example_allowed):
# Show that email status was correctly updatedw with call to CSV.
response = self.test_client.get('/')
self.assertEqual(response.status_code, 200, 'Should have worked')
response = self.test_client.post('/sign-out')
self.assertEqual(response.status_code, 200)
response = self.test_client.get('/')
self.assertFalse('Start' in response.data)
# in TestApp
def test_need_description_to_start_activity(self):
''' You need a description to start a new activity
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
flash_message_text = u'Please describe what you\'re doing when you start a new activity!'
# start a new task without a description
erica.start_task(description=u'')
# the activities-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'activities-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(flash_message_text, erica.soup.find('li', class_='flash').text)
# in TestApp
def test_whitespace_stripped_from_description(self):
''' Carriage returns, tabs, spaces are stripped from task descriptions before they're saved.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
# start a new task with a lot of random whitespace
task_description = u'I think\n\r\n\rI am so \t\t\t coool!!\n\n\nYeah.\n\nOK\n\rERWEREW dkkdk'
task_description_stripped = u'I think I am so coool!! Yeah. OK ERWEREW dkkdk'
erica.start_task(description=task_description)
# the stripped comment is in the HTML
pattern_task_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TASK_COMMENT)
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_task_comment_stripped.format(task_description_stripped) in comments)
# the stripped comment is in the task metadata
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='[email protected]')
task_metadata = repo_functions.get_task_metadata_for_branch(repo, erica.get_branch_name())
self.assertEqual(task_description_stripped, task_metadata['task_description'])
# in TestApp
def test_notification_on_create_category(self):
''' You get a flash notification when you create a category
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_CATEGORY.format(title=category_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_notifications_on_create_edit_and_delete_article(self):
''' You get a flash notification when you create an article
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task
erica.start_task(description=u'Lick Water Droplets From Leaves for Leopard Geckos')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category and sub-category
category_name = u'Rubber Plants'
subcategory_name = u'Leaves'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
# Create an article
article_name = u'Water Droplets'
erica.add_article(article_name=article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_CREATED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# edit the article
erica.edit_article(title_str=article_name, body_str=u'Watch out for poisonous insects.')
# a flash message appeared
self.assertEqual(PATTERN_FLASH_SAVED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# delete the article
erica.open_link(subcategory_path)
erica.delete_article(article_name)
# a flash message appeared
self.assertEqual(PATTERN_FLASH_DELETED_ARTICLE.format(title=article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_branches(self):
''' Check basic branching functionality.
'''
fake_task_description = u'do things for somebody else'
fake_author_email = u'[email protected]'
fake_endorser_email = u'[email protected]'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, view_functions.CONTENT_FILE_EXTENSION)
fake_page_content = u'People of earth we salute you.'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(fake_author_email) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
# get the index page for the branch and verify that the new file is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": view_functions.ARTICLE_LAYOUT}) in response.data)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path),
data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content),
'fr-title': '', 'fr-body': '',
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(fake_page_path in response.data)
self.assertTrue(fake_page_content in response.data)
# Check that English and French forms are both present.
self.assertTrue('name="fr-title"' in response.data)
self.assertTrue('name="en-title"' in response.data)
# Verify that navigation tabs are in the correct order.
self.assertTrue(response.data.index('id="fr-nav"') < response.data.index('id="en-nav"'))
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# the activity we just published should be listed under 'recently published activities'
self.assertTrue(generated_branch_name in response.data)
self.assertTrue(response.data.find(generated_branch_name) > response.data.find(u'Recently Published Activities'))
# Look in the published directory and see if the words are there.
with open(join(self.publish_path, fake_page_slug, 'index.html')) as file:
self.assertTrue(fake_page_content in file.read())
# in TestApp
def test_delete_strange_tasks(self):
''' Delete a task that you can see on the activity list but haven't viewed or edited.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'Creating a Star Child for Ancient Aliens'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# load the activity list and verify that the branch is visible there
response = self.test_client.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(check_branch.name in response.data)
# Delete the activity
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(check_branch.name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(check_branch.name in response.data)
# in TestApp
def test_review_process(self):
''' Check the review process
'''
fake_task_description = u'groom pets for pet owners'
fake_author_email = u'[email protected]'
fake_endorser_email = u'[email protected]'
fake_page_slug = u'hello'
# log in
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
self.assertTrue(PATTERN_REQUEST_FEEDBACK_BUTTON in response.data)
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'request feedback' button
self.assertTrue(PATTERN_REQUEST_FEEDBACK_BUTTON in response.data)
# get the activity list page
response = self.test_client.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's an unreviewed edits link
self.assertTrue(PATTERN_UNREVIEWED_EDITS_LINK.format(branch_name=generated_branch_name) in response.data)
# Request feedback on the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_author_email, repo_functions.ACTIVITY_FEEDBACK_MESSAGE) in response.data)
#
#
# Log in as a different person
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_endorser_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
self.assertTrue(PATTERN_ENDORSE_BUTTON in response.data)
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'Endorse Edits' button
self.assertTrue(PATTERN_ENDORSE_BUTTON in response.data)
# get the activity list page
response = self.test_client.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a feedback requested link
self.assertTrue(PATTERN_FEEDBACK_REQUESTED_LINK.format(branch_name=generated_branch_name) in response.data)
# Endorse the change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(u'{} {}'.format(fake_endorser_email, repo_functions.ACTIVITY_ENDORSED_MESSAGE) in response.data)
# log back in as the original editor
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# get the edit page for the branch
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
self.assertTrue(PATTERN_PUBLISH_BUTTON in response.data)
# get the overview page for the branch
response = self.test_client.get('/tree/{}/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's a 'publish' button
self.assertTrue(PATTERN_PUBLISH_BUTTON in response.data)
# get the activity list page
response = self.test_client.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that there's an 'ready to publish' link
self.assertTrue(PATTERN_READY_TO_PUBLISH_LINK.format(branch_name=generated_branch_name) in response.data)
# And publish the change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
# should've been redirected to the front page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activities-list') in response.data)
# the activity we just published should be listed under 'recently published activities'
self.assertTrue(generated_branch_name in response.data)
self.assertTrue(response.data.find(generated_branch_name) > response.data.find(u'Recently Published Activities'))
# in TestApp
def test_get_request_does_not_create_branch(self):
''' Navigating to a made-up URL should not create a branch
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.auth_csv_example_allowed):
fake_branch_name = 'this-should-not-create-a-branch'
#
# edit
#
response = self.test_client.get('/tree/{}/edit/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# history
#
response = self.test_client.get('/tree/{}/history/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# view
#
response = self.test_client.get('/tree/{}/view/'.format(fake_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch path should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_branch_name) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
# in TestApp
def test_post_request_does_not_create_branch(self):
''' Certain POSTs to a made-up URL should not create a branch
'''
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, view_functions.CONTENT_FILE_EXTENSION)
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.auth_csv_example_allowed):
#
# try creating an article in a non-existent branch
#
fake_branch_name = repo_functions.make_branch_name()
response = self.test_client.post('/tree/{}/edit/'.format(fake_branch_name), data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse(fake_branch_name in self.origin.branches)
#
# create a branch then delete it right before a POSTing a save command
#
fake_task_description = u'Doing fake stuff for Nobody'
response = self.test_client.post('/start', data={'task_description': fake_task_description}, follow_redirects=True)
# we should be on the new task's edit page
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(fake_task_description) in response.data)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
# create a new article
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name), data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('article-edit') in response.data)
# load the article list and verify that the new article is listed
response = self.test_client.get('/tree/{}/edit/'.format(generated_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_BRANCH_COMMENT.format(generated_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": fake_page_slug, "file_title": fake_page_slug, "file_type": view_functions.ARTICLE_LAYOUT}) in response.data)
# load the article edit page and grab the hexsha from the form
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name, fake_page_path))
self.assertEqual(response.status_code, 200)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# delete the branch
response = self.test_client.post('/update', data={'abandon': 'Delete', 'branch': '{}'.format(generated_branch_name)}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(generated_branch_name in response.data)
# try submitting a change to the article
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name, fake_page_path), data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha, 'en-title': 'Greetings', 'en-body': 'Hello world.\n', 'fr-title': '', 'fr-body': '', 'url-slug': 'hello'}, follow_redirects=True)
self.assertEqual(response.status_code, 404)
self.assertTrue(view_functions.MESSAGE_ACTIVITY_DELETED in response.data)
# the task name should not be in the returned HTML
self.assertFalse(PATTERN_BRANCH_COMMENT.format(fake_task_description) in response.data)
# the branch name should not be in the origin's branches list
self.assertFalse('{}'.format(generated_branch_name) in self.origin.branches)
# in TestApp
def test_accessing_local_branch_fetches_remote(self):
''' GETting or POSTing to a URL that indicates a branch that exists remotely but not locally
fetches the remote branch and allows access
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
disposable_task_description = u'unimportant task for unimportant person'
response = self.test_client.post('/start', data={'task_description': disposable_task_description}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(disposable_task_description) in response.data)
# create a branch programmatically on our pre-made clone
check_task_description = u'the branch we are checking for for just me'
check_branch = repo_functions.get_start_branch(self.clone1, 'master', check_task_description, fake_author_email)
self.assertTrue(check_branch.name in self.clone1.branches)
self.assertTrue(check_branch.name in self.origin.branches)
# verify that the branch doesn't exist in our new clone
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
self.assertFalse(check_branch.name in new_clone.branches)
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(check_branch.name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the task description should be in the returned HTML
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('articles-list') in response.data)
self.assertTrue(PATTERN_TASK_COMMENT.format(check_task_description) in response.data)
# the branch name should now be in the original repo's branches list
self.assertTrue(check_branch.name in new_clone.branches)
# in TestApp
def test_git_merge_strategy_implemented(self):
''' The Git merge strategy has been implmemented for a new clone.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# create a new clone via get_repo
with self.app.app_context():
with self.app.test_request_context():
from flask import session
session['email'] = fake_author_email
new_clone = view_functions.get_repo(flask_app=self.app)
# check for the config setting
self.assertEqual(new_clone.config_reader().get_value('merge "ignored"', 'driver'), True)
# check for the attributes setting
attributes_path = join(new_clone.git_dir, 'info/attributes')
self.assertTrue(exists(attributes_path))
with open(attributes_path, 'r') as file:
content = file.read().decode("utf-8")
self.assertEqual(content, u'{} merge=ignored'.format(repo_functions.TASK_METADATA_FILENAME))
# in TestApp
def test_task_metadata_should_exist(self):
''' Task metadata file should exist but doesn't
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
fake_task_description = u'unimportant task for unimportant person'
branch1 = repo_functions.get_start_branch(self.clone1, 'master', fake_task_description, fake_author_email)
branch1_name = branch1.name
branch1.checkout()
# verify that the most recent commit on the new branch is for the task metadata file
# by checking for the name of the file in the commit message
self.assertTrue(repo_functions.TASK_METADATA_FILENAME in branch1.commit.message)
# validate the existence of the task metadata file
self.assertTrue(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# now delete it
repo_functions.delete_task_metadata_for_branch(self.clone1, 'master')
self.assertFalse(repo_functions.verify_file_exists_in_branch(self.clone1, repo_functions.TASK_METADATA_FILENAME, branch1_name))
# verify that we can load a functional edit page for the branch
with HTTMock(self.auth_csv_example_allowed):
# request an edit page for the check branch through the http interface
response = self.test_client.get('/tree/{}/edit/'.format(branch1_name), follow_redirects=True)
# it's a good response
self.assertEqual(response.status_code, 200)
# the branch name should be in the returned HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(branch1_name) in response.data)
# the 'Started by' should be 'Unknown' for now
self.assertTrue(PATTERN_AUTHOR_COMMENT.format(u'unknown') in response.data)
# in TestApp
def test_google_callback_is_successful(self):
''' Ensure we get a successful page load on callback from Google authentication
'''
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
with HTTMock(self.mock_successful_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code')
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
self.assertEqual(ga_config['access_token'], 'meowser_token')
self.assertEqual(ga_config['refresh_token'], 'refresh_meows')
self.assertTrue('/setup' in response.location)
# in TestApp
def test_analytics_setup_is_successful(self):
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.mock_google_authorization):
self.test_client.post('/authorize')
# mock-post the form in authorize.html to authorization-complete.html with some dummy values and check the results
response = self.test_client.post('/authorization-complete', data={'email': '[email protected]', 'name': 'Jane Doe', 'google_email': '[email protected]', 'return_link': 'http://example.com', 'property': '12345678', '12345678-domain': 'http://propertyone.example.com', '12345678-name': 'Property One'})
self.assertEqual(u'200 OK', response.status)
with self.app.app_context():
ga_config = google_api_functions.read_ga_config(self.app.config['RUNNING_STATE_DIR'])
# views.authorization_complete() strips the 'http://' from the domain
self.assertEqual(ga_config['project_domain'], 'propertyone.example.com')
self.assertEqual(ga_config['profile_id'], '12345678')
# in TestApp
def test_handle_bad_analytics_response(self):
''' Verify that an unauthorized analytics response is handled correctly
'''
with HTTMock(self.mock_google_invalid_credentials_response):
with self.app.app_context():
analytics_dict = google_api_functions.fetch_google_analytics_for_page(self.app.config, u'index.html', 'meowser_token')
self.assertEqual(analytics_dict, {})
# in TestApp
def test_google_callback_fails(self):
''' Ensure that we get an appropriate error flashed when we fail to auth with google
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.mock_google_authorization):
response = self.test_client.post('/authorize')
with HTTMock(self.mock_failed_google_callback):
response = self.test_client.get('/callback?state=PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP&code=code', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Google rejected authorization request' in response.data)
# in TestApp
def test_invalid_access_token(self):
''' Ensure that we get an appropriate error flashed when we have an invalid access token
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_invalid_credentials_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Invalid Credentials' in response.data)
# in TestApp
def test_no_properties_found(self):
''' Ensure that we get an appropriate error flashed when no analytics properties are
associated with the authorized Google account
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
self.assertEqual(response.status_code, 200)
with HTTMock(self.mock_google_no_properties_response):
response = self.test_client.get('/setup', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# find the flashed error message in the returned HTML
self.assertTrue('Your Google Account is not associated with any Google Analytics properties' in response.data)
# in TestApp
def test_redirect(self):
''' Check redirect to BROWSERID_URL.
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.get('/not-allowed', headers={'Host': 'wrong.local'})
expected_url = urljoin(self.app.config['BROWSERID_URL'], '/not-allowed')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], expected_url)
# in TestApp
def test_create_category(self):
''' Creating a new category creates a directory with an appropriate index file inside.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
page_slug = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_period_in_category_name(self):
''' Putting a period in a category or subcategory name doesn't crop it.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
# Start a new task
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category that has a period in its name
category_name = u'Mt. Splashmore'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# the category is correctly represented on the page
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_name in tag.text)))
self.assertIsNotNone(erica.soup.find(lambda tag: bool(tag.name == 'a' and category_slug in tag['href'])))
# the category is correctly represented on disk
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='[email protected]')
cat_location = join(repo.working_dir, u'{}/{}'.format(other_slug, category_slug))
self.assertTrue(exists(cat_location))
self.assertTrue(view_functions.is_category_dir(cat_location))
# in TestApp
def test_empty_category_or_article_name(self):
''' Submitting an empty category or article name reloads with a warning.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
pattern_template_comment_stripped = sub(ur'<!--|-->', u'', PATTERN_TEMPLATE_COMMENT)
# Start a new task
erica.start_task(description=u'Deep-Fry a Buffalo in Forty Seconds for Moe')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Try to create a category with no name
category_name = u''
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create a topic!', erica.soup.find('li', class_='flash').text)
# Try to create a category with a name that slufigies to an empty string
category_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(category_name))
erica.add_category(category_name=category_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable topic name!'.format(category_name), erica.soup.find('li', class_='flash').text)
# Create a category and sub-category
category_name = u'Mammals'
subcategory_name = u'Bison'
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
# Try to create an article with no name
article_name = u''
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'Please enter a name to create an article!', erica.soup.find('li', class_='flash').text)
# Try to create a article with a name that slufigies to an empty string
article_name = u'(╯□)╯︵ ┻━┻'
self.assertEqual(u'', slugify(article_name))
erica.add_article(article_name=article_name)
# the articles-list template reloaded
comments = erica.soup.findAll(text=lambda text: isinstance(text, Comment))
self.assertTrue(pattern_template_comment_stripped.format(u'articles-list') in comments)
# verify that there's a flash message warning about submitting an empty description
self.assertEqual(u'{} is not an acceptable article name!'.format(article_name), erica.soup.find('li', class_='flash').text)
# in TestApp
def test_create_duplicate_category(self):
''' If we ask to create a category that exists, let's not and say we did.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
working_branch = repo_functions.get_start_branch(self.clone1, 'master', u'force a clam shell open for starfish', fake_author_email)
working_branch.checkout()
# create a new category
request_data = {'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': u'hello'}
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# now do it again
response = self.test_client.post('/tree/{}/edit/'.format(working_branch.name),
data=request_data,
follow_redirects=True)
self.assertEqual(response.status_code, 200)
response_data = sub('"', '"', response.data.decode('utf-8'))
self.assertTrue(u'Topic "hello" already exists' in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch.name)
# everything looks good
dir_location = join(self.clone1.working_dir, u'hello')
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# in TestApp
def test_delete_categories_and_articles(self):
''' Non-empty categories and articles can be deleted
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'vomit digestive fluid onto rotting flesh for flies'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cata_title = u'Mouth Parts'
cata_slug = slugify(cata_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': cata_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# put another cateogry inside that
catb_title = u'Esophagus'
catb_slug = slugify(catb_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug)),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': catb_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# and an article inside that
art_title = u'Stomach'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug, catb_slug)),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the categories and article exist
art_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug, art_slug)
catb_location = join(self.clone1.working_dir, categories_slug, cata_slug, catb_slug)
cata_location = join(self.clone1.working_dir, categories_slug, cata_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete category a while in category b
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, join(categories_slug, cata_slug, catb_slug)),
data={'action': 'delete', 'request_path': join(categories_slug, cata_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
self.assertFalse(exists(catb_location))
self.assertFalse(exists(cata_location))
# in TestApp
def test_delete_commit_accuracy(self):
''' The record of a delete in the corresponding commit is accurate.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
# Start a new task
erica.start_task(description=u'Ferment Tuber Fibres Using Symbiotic Bacteria in the Intestines for Naked Mole Rats')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
erica.follow_link(href='/tree/{}/edit/other/'.format(branch_name))
# Create a category and fill it with some subcategories and articles
category_names = [u'Indigestible Cellulose']
subcategory_names = [u'Volatile Fatty Acids', u'Non-Reproducing Females', u'Arid African Deserts']
article_names = [u'Eusocial Exhibition', u'Old Enough to Eat Solid Food', u'Contributing to Extension of Tunnels', u'Foraging and Nest Building']
erica.add_category(category_name=category_names[0])
category_path = erica.path
erica.add_subcategory(subcategory_name=subcategory_names[0])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[1])
erica.open_link(category_path)
erica.add_subcategory(subcategory_name=subcategory_names[2])
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[2])
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[3])
# Delete the all-containing category
erica.open_link(category_path)
erica.follow_modify_category_link(category_names[0])
erica.delete_category()
# get and check the history
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='[email protected]')
activity_history = view_functions.make_activity_history(repo=repo)
delete_history = json.loads(activity_history[0]['commit_body'])
for item in delete_history:
self.assertEqual(item['action'], u'delete')
if item['title'] in category_names:
self.assertEqual(item['display_type'], u'category')
category_names.remove(item['title'])
elif item['title'] in subcategory_names:
self.assertEqual(item['display_type'], u'category')
subcategory_names.remove(item['title'])
elif item['title'] in article_names:
self.assertEqual(item['display_type'], u'article')
article_names.remove(item['title'])
# we should have fewer category, subcategory, and article names
self.assertEqual(len(category_names), 0)
self.assertEqual(len(subcategory_names), 0)
self.assertEqual(len(article_names), 0)
# in TestApp
def test_delete_article(self):
''' An article can be deleted
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'Remove Small Organic Particles From Seawater Passing Over Outspread Tentacles for Sea Anemones'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create an article
art_title = u'Zooplankters'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': art_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the article exists
art_location = join(self.clone1.working_dir, art_slug)
self.assertTrue(exists(art_location))
self.assertTrue(view_functions.is_article_dir(art_location))
# delete the article
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, art_slug),
data={'action': 'delete', 'request_path': art_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# verify that the deleted category and article no longer exist
self.assertFalse(exists(art_location))
# in TestApp
def test_article_creation_with_unicode_via_web_interface(self):
''' An article with unicode in its title is created as expected.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": view_functions.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_save_non_roman_characters_to_article(self):
''' Adding non-roman characters to an article's title and body raises no unicode errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task, topic, subtopic, article
args = 'Mermithergate for Ant Worker', 'Enoplia Nematode', 'Genus Mermis', 'Cephalotes Atratus'
erica.quick_activity_setup(*args)
# Edit the new article and give it a non-roman character title
erica.edit_article(u'快速狐狸', u'Myrmeconema ੯ूᵕू ໒꒱ƶƵ Neotropicum')
# in TestApp
def test_sign_in_with_email_containing_non_roman_characters(self):
''' Adding non-roman characters to the sign-in email raises no errors.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_non_roman):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('੯ूᵕू ໒꒱ƶƵ@快速狐狸.com')
# in TestApp
def test_new_item_has_name_and_title(self):
''' A slugified directory name and display title are created when a new category or article is created.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'eviscerate a salmon for baby grizzly bears'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new category
cat_title = u'grrowl!! Yeah'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), cat_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": cat_title, "file_type": view_functions.CATEGORY_LAYOUT}) in response.data)
# create a new article
art_title = u'快速狐狸'
art_slug = slugify(art_title)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name), data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': art_title}, follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'article-edit') in response.data.decode('utf-8'))
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, art_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the article
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), art_title)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format(u'articles-list') in response.data.decode('utf-8'))
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch) in response.data.decode('utf-8'))
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": art_slug, "file_title": art_title, "file_type": view_functions.ARTICLE_LAYOUT}) in response.data.decode('utf-8'))
# in TestApp
def test_edit_category_title_and_description(self):
''' A category's title and description can be edited.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'rapidly discharge black ink into the mantle cavity for squids'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Bolus'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# get the modify page and verify that the form renders with the correct values
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(view_functions.CONTENT_FILE_EXTENSION))
response = self.test_client.get('/tree/{}/modify/{}'.format(working_branch_name, view_functions.strip_index_file(cat_path)), follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=cat_title) in response.data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=u'') in response.data)
# now save a new title and description for the category
new_cat_title = u'Caecum'
cat_description = u'An intraperitoneal pouch, that is considered to be the beginning of the large intestine.'
response = self.test_client.post('/tree/{}/modify/{}'.format(working_branch_name, cat_path),
data={'layout': view_functions.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': u'{}/{}/'.format(categories_slug, cat_slug),
'en-title': new_cat_title, 'en-description': cat_description, 'order': u'0', 'save': u''},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
response_data = sub(''', '\'', response.data.decode('utf-8'))
self.assertTrue(PATTERN_FLASH_SAVED_CATEGORY.format(title=new_cat_title) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_DESCRIPTION.format(description=cat_description) in response_data)
self.assertTrue(PATTERN_FORM_CATEGORY_TITLE.format(title=new_cat_title) in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title and description saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": view_functions.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_delete_category(self):
''' A category can be deleted
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'clasp with front legs and draw up the hind end for geometridae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Soybean Looper'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now delete the category
cat_description = u''
url_slug = u'{}/{}/'.format(categories_slug, cat_slug)
response = self.test_client.post('/tree/{}/modify/{}'.format(working_branch_name, url_slug.rstrip('/')),
data={'layout': view_functions.CATEGORY_LAYOUT, 'hexsha': hexsha, 'url-slug': url_slug,
'en-title': cat_title, 'en-description': cat_description, 'order': u'0', 'delete': u''},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and title values (format will change as pages are designed)
response_data = sub('"', '"', response.data.decode('utf-8'))
self.assertTrue(u'<li class="flash flash--notice">The "{}" topic was deleted</li>'.format(cat_title) in response_data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the directory was deleted
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
self.assertFalse(exists(dir_location) and isdir(dir_location))
# the title is not displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertFalse(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": cat_title, "file_type": view_functions.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_set_and_retrieve_order_and_description(self):
''' Order and description can be set to and retrieved from an article's or category's front matter.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'regurgitate partially digested worms and grubs for baby birds'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a categories directory
categories_slug = u'categories'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': categories_slug},
follow_redirects=True)
# and put a new category inside it
cat_title = u'Small Intestine'
cat_slug = slugify(cat_title)
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, categories_slug),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': cat_title},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the hexsha
hexsha = self.clone1.commit().hexsha
# now save some values into the category's index page's front matter
new_cat_title = u'The Small Intestine'
cat_description = u'The part of the GI tract following the stomach and followed by the large intestine where much of the digestion and absorption of food takes place.'
cat_order = 3
cat_path = join(categories_slug, cat_slug, u'index.{}'.format(view_functions.CONTENT_FILE_EXTENSION))
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, cat_path),
data={'layout': view_functions.CATEGORY_LAYOUT, 'hexsha': hexsha,
'en-title': new_cat_title, 'en-description': cat_description, 'order': cat_order},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# check the returned HTML for the description and order values (format will change as pages are designed)
self.assertTrue(u'<input name="en-description" type="hidden" value="{}" />'.format(cat_description) in response.data)
self.assertTrue(u'<input name="order" type="hidden" value="{}" />'.format(cat_order) in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, categories_slug, cat_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the category test
self.assertTrue(view_functions.is_category_dir(dir_location))
# the title saved in the index front matter is the same text that was used to create the category
self.assertEqual(view_functions.get_value_from_front_matter('title', idx_location), new_cat_title)
# check order and description
self.assertEqual(view_functions.get_value_from_front_matter('order', idx_location), cat_order)
self.assertEqual(view_functions.get_value_from_front_matter('description', idx_location), cat_description)
# the title saved in the index front matter is displayed on the article list page
response = self.test_client.get('/tree/{}/edit/{}'.format(working_branch_name, categories_slug), follow_redirects=True)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": cat_slug, "file_title": new_cat_title, "file_type": view_functions.CATEGORY_LAYOUT}) in response.data)
# in TestApp
def test_column_navigation_structure(self):
''' The column navigation structure matches the structure of the site.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'force a clam shell open for starfish'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create some nested categories
slug_hello = u'hello'
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': slug_hello},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_world = u'world'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, slug_hello),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': slug_world},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_how = u'how'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world])),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': slug_how},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
slug_are = u'are'
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, sep.join([slug_hello, slug_world, slug_how])),
data={'action': 'create', 'create_what': view_functions.CATEGORY_LAYOUT, 'request_path': slug_are},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# get the columns
dir_columns = view_functions.make_directory_columns(self.clone1, working_branch_name, sep.join([slug_hello, slug_world, slug_how, slug_are]))
# test that the contents match our expectations
self.assertEqual(len(dir_columns), 4)
self.assertEqual(len(dir_columns[0]['files']), 6)
expected = {'hello': u'category', 'img': u'folder', 'index.md': u'file', 'other': u'folder', 'other.md': u'file', 'sub': u'folder'}
for item in dir_columns[0]['files']:
self.assertTrue(item['name'] in expected)
self.assertTrue(expected[item['name']] == item['display_type'])
self.assertTrue(dir_columns[1]['files'][0]['name'] == slug_world)
self.assertTrue(dir_columns[2]['files'][0]['name'] == slug_how)
self.assertTrue(dir_columns[3]['files'][0]['name'] == slug_are)
# in TestApp
def test_activity_overview_page_is_accurate(self):
''' The activity history page accurately displays the activity history
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'deposit eggs in a syconium for fig wasp larvae'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
title_fig_zh = u'无花果'
slug_fig_zh = u'wu-hua-guo'
title_syconium = u'Syconium'
slug_syconium = u'syconium'
title_ostiole = u'Ostiole'
title_fig_en = u'Fig'
title_fig_bn = u'Dumur'
create_details = [
(u'', title_fig_zh, view_functions.CATEGORY_LAYOUT),
(slug_fig_zh, title_syconium, view_functions.CATEGORY_LAYOUT),
(u'{}/{}'.format(slug_fig_zh, slug_syconium), title_ostiole, view_functions.ARTICLE_LAYOUT),
(u'', title_fig_en, view_functions.CATEGORY_LAYOUT),
(u'', title_fig_bn, view_functions.CATEGORY_LAYOUT)
]
for detail in create_details:
response = self.test_client.post('/tree/{}/edit/{}'.format(working_branch_name, detail[0]),
data={'action': 'create', 'create_what': detail[2], 'request_path': detail[1]},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# add a comment
comment_text = u'The flowers provide a safe haven and nourishment for the next generation of wasps. ᙙᙖ'
response = self.test_client.post('/tree/{}/'.format(working_branch_name),
data={'comment': 'Comment', 'comment_text': comment_text},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# delete a directory
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'delete', 'request_path': slug_fig_zh},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# get the activity history page
response = self.test_client.get('/tree/{}/'.format(working_branch_name), follow_redirects=True)
# TODO: for some reason (encoding?) my double-quotes are being replaced by " in the returned HTML
response_data = sub('"', '"', response.data.decode('utf-8'))
# make sure everything we did above is shown on the activity page
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('activity-overview') in response_data)
self.assertTrue(PATTERN_OVERVIEW_ACTIVITY_STARTED.format(**{"activity_name": task_description, "author_email": fake_author_email}) in response_data)
self.assertTrue(PATTERN_OVERVIEW_COMMENT_BODY.format(**{"comment_body": comment_text}) in response_data)
self.assertTrue(PATTERN_OVERVIEW_ITEM_DELETED.format(**{"deleted_name": title_fig_zh, "deleted_type": view_functions.file_display_name(view_functions.CATEGORY_LAYOUT), "deleted_also": u'(containing 1 topic and 1 article) ', "author_email": fake_author_email}) in response_data)
for detail in create_details:
self.assertTrue(PATTERN_OVERVIEW_ITEM_CREATED.format(**{"created_name": detail[1], "created_type": detail[2], "author_email": fake_author_email}), response_data)
# in TestApp
def test_activity_history_summary_accuracy(self):
''' The summary of an activity's history is displayed as expected.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.test_client, self)
erica.sign_in(email='[email protected]')
# Start a new task
erica.start_task(description=u'Parasitize with Ichneumonidae for Moth Larvae')
# Get the branch name
branch_name = erica.get_branch_name()
# Load the activity overview page
erica.follow_link(href='/tree/{}'.format(branch_name))
# there shouldn't be a summary yet
summary_div = erica.soup.find("div", class_="activity-summary")
self.assertIsNone(summary_div)
# Load the "other" folder
erica.open_link(url='/tree/{}/edit/other/'.format(branch_name))
# Create a category, sub-category, article
category_name = u'Antennae Segments'
subcategory_name = u'Short Ovipositors'
article_names = [u'Inject Eggs Directly Into a Host Body', u'A Technique Of Celestial Navigation Called Transverse Orientation']
erica.add_category(category_name=category_name)
erica.add_subcategory(subcategory_name=subcategory_name)
subcategory_path = erica.path
erica.add_article(article_name=article_names[0])
# edit the article
erica.edit_article(title_str=article_names[0], body_str=u'Inject venom along with the egg')
# create another article and delete it
erica.open_link(subcategory_path)
erica.add_article(article_name=article_names[1])
erica.open_link(subcategory_path)
erica.delete_article(article_names[1])
# Load the activity overview page
erica.open_link(url='/tree/{}/'.format(branch_name))
# there is a summary
summary_div = erica.soup.find("div", class_="activity-summary")
self.assertIsNotNone(summary_div)
# it's right about what's changed
self.assertIsNotNone(summary_div.find(lambda tag: bool(tag.name == 'a' and '2 articles and 2 topics' in tag.text)))
# grab all the table rows
check_rows = summary_div.find_all('tr')
# make sure they match what we did above
category_row = check_rows.pop()
category_cells = category_row.find_all('td')
self.assertIsNotNone(category_cells[0].find('a'))
self.assertEqual(category_cells[0].text, category_name)
self.assertEqual(category_cells[1].text, u'Category')
self.assertEqual(category_cells[2].text, u'Created')
subcategory_row = check_rows.pop()
subcategory_cells = subcategory_row.find_all('td')
self.assertIsNotNone(subcategory_cells[0].find('a'))
self.assertEqual(subcategory_cells[0].text, subcategory_name)
self.assertEqual(subcategory_cells[1].text, u'Category')
self.assertEqual(subcategory_cells[2].text, u'Created')
article_1_row = check_rows.pop()
article_1_cells = article_1_row.find_all('td')
self.assertIsNotNone(article_1_cells[0].find('a'))
self.assertEqual(article_1_cells[0].text, article_names[0])
self.assertEqual(article_1_cells[1].text, u'Article')
self.assertEqual(article_1_cells[2].text, u'Created, Edited')
article_2_row = check_rows.pop()
article_2_cells = article_2_row.find_all('td')
self.assertIsNone(article_2_cells[0].find('a'))
self.assertEqual(article_2_cells[0].text, article_names[1])
self.assertEqual(article_2_cells[1].text, u'Article')
self.assertEqual(article_2_cells[2].text, u'Created, Deleted')
# only the header row's left
self.assertEqual(len(check_rows), 1)
# in TestApp
def test_create_page_creates_directory_containing_index(self):
''' Creating a new page creates a directory with an editable index file inside.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# a directory was created
dir_location = join(self.clone1.working_dir, page_slug)
idx_location = u'{}/index.{}'.format(dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(dir_location) and isdir(dir_location))
# an index page was created inside
self.assertTrue(exists(idx_location))
# the directory and index page pass the article test
self.assertTrue(view_functions.is_article_dir(dir_location))
# in TestApp
def test_can_rename_editable_directories(self):
''' Can rename an editable directory.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(new_page_path in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertFalse(exists(old_dir_location))
# the new directory exists and is properly structured
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertTrue(exists(new_dir_location) and isdir(new_dir_location))
# an index page is inside
idx_location = u'{}/index.{}'.format(new_dir_location, view_functions.CONTENT_FILE_EXTENSION)
self.assertTrue(exists(idx_location))
# the directory and index page pass the editable test
self.assertTrue(view_functions.is_article_dir(new_dir_location))
# in TestApp
def test_cannot_move_a_directory_inside_iteslf(self):
''' Can't rename an editable directory in a way which moves it inside itself
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
new_page_slug = u'hello/is/better/than/goodbye'
new_page_path = u'{}/index.{}'.format(new_page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/save/{}'.format(working_branch_name, page_path),
data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': u'',
'en-body': u'',
'fr-title': u'', 'fr-body': u'',
'url-slug': u'{}'.format(new_page_slug)},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
# the new page shouldn't have been created
self.assertFalse(new_page_path in response.data)
# there shoudld be a flashed error message
self.assertTrue(u'I cannot move a directory inside itself!' in response.data)
# pull the changes
self.clone1.git.pull('origin', working_branch_name)
# the old directory is not gone
old_dir_location = join(self.clone1.working_dir, page_slug)
self.assertTrue(exists(old_dir_location))
# the new directory doesn't exist
new_dir_location = join(self.clone1.working_dir, new_page_slug)
self.assertFalse(exists(new_dir_location) and isdir(new_dir_location))
# in TestApp
def test_editable_directories_are_shown_as_articles(self):
''' Editable directories (directories containing only an editable index file) are displayed as articles.
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'filter plankton from sea water for humpback whales'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# create a new page
page_slug = u'hello'
page_path = u'{}/index.{}'.format(page_slug, view_functions.CONTENT_FILE_EXTENSION)
response = self.test_client.post('/tree/{}/edit/'.format(working_branch_name),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': page_slug},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(page_path in response.data)
# load the index page
response = self.test_client.get('/tree/{}/edit/'.format(working_branch_name), follow_redirects=True)
self.assertEqual(response.status_code, 200)
# verify that the new folder is represented as a file in the HTML
self.assertTrue(PATTERN_BRANCH_COMMENT.format(working_branch_name) in response.data)
self.assertTrue(PATTERN_FILE_COMMENT.format(**{"file_name": page_slug, "file_title": page_slug, "file_type": view_functions.ARTICLE_LAYOUT}) in response.data)
# in TestApp
def test_page_not_found_error(self):
''' A 404 page is generated when we get an address that doesn't exist
'''
fake_author_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_author_email})
with HTTMock(self.auth_csv_example_allowed):
# start a new branch via the http interface
# invokes view_functions/get_repo which creates a clone
task_description = u'drink quinine for mosquitos'
working_branch = repo_functions.get_start_branch(self.clone1, 'master', task_description, fake_author_email)
self.assertTrue(working_branch.name in self.clone1.branches)
self.assertTrue(working_branch.name in self.origin.branches)
working_branch_name = working_branch.name
working_branch.checkout()
# get a non-existent page
response = self.test_client.get('tree/{}/malaria'.format(working_branch_name), follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-404') in response.data)
# these values are set in setUp() above
self.assertTrue(u'[email protected]' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_garbage_edit_url_raises_page_not_found(self):
''' A 404 page is generated when we get an edit address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task
erica.start_task(description=u'Take Malarone for People Susceptible to Malaria')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Rubber Plants'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent page within the category
erica.open_link(url='/tree/{}/edit/{}/malaria'.format(branch_name, category_slug), expected_status_code=404)
# in TestApp
def test_garbage_view_url_raises_page_not_found(self):
''' A 404 page is generated when we get a view address that doesn't exist
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task
erica.start_task(description=u'Chew Mulberry Leaves for Silkworms')
# Get the branch name
branch_name = erica.get_branch_name()
# Enter the "other" folder
other_slug = u'other'
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, other_slug))
# Create a category
category_name = u'Bombyx Mori'
category_slug = slugify(category_name)
erica.add_category(category_name=category_name)
# Try to load a non-existent asset within the other folder
erica.open_link(url='/tree/{}/view/{}/{}/missing.jpg'.format(branch_name, other_slug, category_slug), expected_status_code=404)
# in TestApp
def test_internal_server_error(self):
''' A 500 page is generated when we provoke a server error
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.mock_internal_server_error):
response = self.test_client.get('/', follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'[email protected]' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_exception_error(self):
''' A 500 page is generated when we provoke an uncaught exception
'''
with HTTMock(self.mock_persona_verify_erica):
response = self.test_client.post('/sign-in', data={'assertion': '[email protected]'})
with HTTMock(self.mock_exception):
response = self.test_client.get('/', follow_redirects=True)
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
# these values are set in setUp() above
self.assertTrue(u'[email protected]' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_merge_conflict_error(self):
''' We get a merge conflict error page when there's a merge conflict
'''
fake_task_description_1 = u'do things for somebody else'
fake_task_description_2 = u'do other things for somebody even else'
fake_email_1 = u'[email protected]'
fake_email_2 = u'[email protected]'
fake_page_slug = u'hello'
fake_page_path = u'{}/index.{}'.format(fake_page_slug, view_functions.CONTENT_FILE_EXTENSION)
fake_page_content_1 = u'Hello world.'
fake_page_content_2 = u'Hello moon.'
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_1}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
self.assertIsNotNone(generated_branch_search)
try:
generated_branch_name_1 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_1),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_1, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_1, fake_page_path),
data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': 'Greetings',
'en-body': u'{}\n'.format(fake_page_content_1),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
#
#
# Log in as person 2
with HTTMock(self.mock_persona_verify_frances):
self.test_client.post('/sign-in', data={'assertion': fake_email_2})
with HTTMock(self.auth_csv_example_allowed):
# create a new branch
response = self.test_client.post('/start', data={'task_description': fake_task_description_2}, follow_redirects=True)
# extract the generated branch name from the returned HTML
generated_branch_search = search(r'<!-- branch: (.{{{}}}) -->'.format(repo_functions.BRANCH_NAME_LENGTH), response.data)
try:
generated_branch_name_2 = generated_branch_search.group(1)
except AttributeError:
raise Exception('No match for generated branch name.')
with HTTMock(self.mock_google_analytics):
# create a new file
response = self.test_client.post('/tree/{}/edit/'.format(generated_branch_name_2),
data={'action': 'create', 'create_what': view_functions.ARTICLE_LAYOUT, 'request_path': fake_page_slug},
follow_redirects=True)
# get the edit page for the new file and extract the hexsha value
response = self.test_client.get('/tree/{}/edit/{}'.format(generated_branch_name_2, fake_page_path))
hexsha = search(r'<input name="hexsha" value="(\w+)"', response.data).group(1)
# now save the file with new content
fake_new_title = u'Bloople'
response = self.test_client.post('/tree/{}/save/{}'.format(generated_branch_name_2, fake_page_path),
data={'layout': view_functions.ARTICLE_LAYOUT, 'hexsha': hexsha,
'en-title': fake_new_title,
'en-body': u'{}\n'.format(fake_page_content_2),
'url-slug': u'{}/index'.format(fake_page_slug)},
follow_redirects=True)
# Request feedback on person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'request_feedback': u'Request Feedback'}, follow_redirects=True)
# Endorse person 1's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 1's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_1), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
#
#
# Log in as person 1
with HTTMock(self.mock_persona_verify_erica):
self.test_client.post('/sign-in', data={'assertion': fake_email_1})
# Endorse person 2's change
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'endorse_edits': 'Endorse Edits'}, follow_redirects=True)
# And publish person 2's change!
with HTTMock(self.auth_csv_example_allowed):
response = self.test_client.post('/tree/{}/'.format(generated_branch_name_2), data={'comment_text': u'', 'merge': 'Publish'}, follow_redirects=True)
# verify that we got an error page about the merge conflict
self.assertTrue(PATTERN_TEMPLATE_COMMENT.format('error-500') in response.data)
self.assertTrue(u'MergeConflict' in response.data)
self.assertTrue(u'{}/index.{}'.format(fake_page_slug, view_functions.CONTENT_FILE_EXTENSION) in response.data)
self.assertTrue(u'<td><a href="/tree/{}/edit/{}/">{}</a></td>'.format(generated_branch_name_2, fake_page_slug, fake_new_title))
self.assertTrue(u'<td>Article</td>' in response.data)
self.assertTrue(u'<td>Edited</td>' in response.data)
# these values are set in setUp() above
self.assertTrue(u'[email protected]' in response.data)
self.assertTrue(u'(123) 456-7890' in response.data)
# in TestApp
def test_redirect_into_solo_folder(self):
''' Loading a folder with a sole non-article or -category directory in it redirects to the contents of that directory.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in('[email protected]')
# Start a new task
erica.start_task(description=u'Be Shot Hundreds Of Feet Into The Air for A Geyser Of Highly Pressurized Water')
# Get the branch name
branch_name = erica.get_branch_name()
# create a directory containing only another directory
repo = view_functions.get_repo(repo_path=self.app.config['REPO_PATH'], work_path=self.app.config['WORK_PATH'], email='[email protected]')
testing_slug = u'testing'
categories_slug = u'categories'
mkdir(join(repo.working_dir, testing_slug))
mkdir(join(repo.working_dir, testing_slug, categories_slug))
# open the top level directory
erica.open_link(url='/tree/{}/edit/'.format(branch_name))
# enter the 'testing' directory
erica.follow_link(href='/tree/{}/edit/{}/'.format(branch_name, testing_slug))
# we should've automatically been redirected into the 'categories' directory
self.assertEqual(erica.path, '/tree/{}/edit/{}/'.format(branch_name, join(testing_slug, categories_slug)))
# in TestApp
def test_article_preview(self):
''' Check edit process with a user previewing their article.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('[email protected]')
# Start a new task, "Diving for Dollars".
frances.start_task(description=u'Diving for Dollars')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a new category "Ninjas", subcategory "Flipping Out", and article "So Awesome".
frances.add_category('Ninjas')
frances.add_subcategory('Flipping Out')
frances.add_article('So Awesome')
edit_path = frances.path
# Preview the new article.
frances.preview_article('So, So Awesome', 'It was the best of times.')
expected_path = '/tree/{}/view/other/ninjas/flipping-out/so-awesome'.format(branch_name)
self.assertTrue(frances.path.startswith(expected_path), 'Should be on a preview path')
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there')
# Look back at the edit form.
frances.open_link(edit_path)
self.assertTrue('best of times' in str(frances.soup), 'Should see current content there, too')
# in TestApp
def test_alpha_sort_in_admin(self):
''' Make sure items are sorted alphabetically in the Chime admin interface
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('[email protected]')
# Start a new task
frances.start_task(description=u'Crunching Beetles for Trap-Door Spiders')
branch_name = frances.get_branch_name()
# Look for an "other" link that we know about - is it a category?
frances.follow_link('/tree/{}/edit/other/'.format(branch_name))
# Create a bunch of new categories
frances.add_categories(['Anthicidae', 'Scydmaenidae', 'Paussinae', 'Bostrychidae', 'Scolytidae', 'Anobiidae', 'Meloidae', 'Dermestidae', 'Silphidae'])
# The categories should be sorted by title on the page
rendered_categories = [tag.text for tag in frances.soup.find_all('a', class_='category')]
sorted_categories = sorted(rendered_categories)
self.assertEqual(rendered_categories, sorted_categories)
# in TestApp
def test_overload_front_page(self):
''' Try to overload the front page with multiple simultaneous requests.
'''
with HTTMock(self.auth_csv_example_allowed):
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in('[email protected]')
# Start a new task
frances.start_task(description=u'Beating Crunches for Door-Spider Traps')
# hit the front page a bunch of times
times = 20
pros = []
for blip in range(times):
process = Process(target=frances.open_link, args=('/',))
process.start()
pros.append(process)
# wait until the processes are done
for process in pros:
process.join()
# raise if any errors were raised
for process in pros:
self.assertEqual(0, process.exitcode, u'A process that was trying to load the front page failed!')
# in TestApp
def test_published_activities_displayed(self):
''' Published activities are displayed on the activities list page.
'''
with HTTMock(self.auth_csv_example_allowed):
erica_email = u'[email protected]'
frances_email = u'[email protected]'
with HTTMock(self.mock_persona_verify_erica):
erica = ChimeTestClient(self.app.test_client(), self)
erica.sign_in(erica_email)
with HTTMock(self.mock_persona_verify_frances):
frances = ChimeTestClient(self.app.test_client(), self)
frances.sign_in(frances_email)
# Start a new task and create a topic, subtopic and article
activity_title = u'Flicking Ants Off My Laptop'
args = activity_title, u'Flying', u'Through The Air', u'Goodbye'
branch_name = erica.quick_activity_setup(*args)
# Ask for feedback
erica.follow_link(href='/tree/{}'.format(branch_name))
erica.request_feedback()
#
# Switch users and publish the article.
#
frances.open_link(url=erica.path)
frances.approve_activity()
frances.publish_activity()
#
# Load the front page and make sure the activity is listed as published
#
erica.open_link('/')
pub_ul = erica.soup.select('ul#published-activities')[0]
# there should be an HTML comment with the branch name
comment = pub_ul.findAll(text=lambda text: isinstance(text, Comment))[0]
self.assertTrue(branch_name in comment)
pub_li = comment.find_parent('li')
# and the activity title wrapped in a p tag
self.assertIsNotNone(pub_li.find('p', text=activity_title))
class TestPublishApp (TestCase):
def setUp(self):
self.old_tempdir, tempfile.tempdir = tempfile.tempdir, mkdtemp(prefix='chime-TestPublishApp-')
self.work_path = mkdtemp(prefix='chime-publish-app-')
app_args = {}
self.app = publish.create_app(app_args)
self.client = self.app.test_client()
def tearDown(self):
rmtree(tempfile.tempdir)
tempfile.tempdir = self.old_tempdir
def mock_github_request(self, url, request):
'''
'''
_, host, path, _, _, _ = urlparse(url.geturl())
if (host, path) == ('github.com', '/chimecms/chime-starter/archive/93250f1308daef66c5809fe87fc242d092e61db7.zip'):
return response(302, '', headers={'Location': 'https://codeload.github.com/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'})
if (host, path) == ('codeload.github.com', '/chimecms/chime-starter/tar.gz/93250f1308daef66c5809fe87fc242d092e61db7'):
with open(join(dirname(__file__), '93250f1308daef66c5809fe87fc242d092e61db7.zip')) as file:
return response(200, file.read(), headers={'Content-Type': 'application/zip'})
raise Exception('Unknown URL {}'.format(url.geturl()))
# in TestPublishApp
def test_webhook_post(self):
''' Check basic webhook flow.
'''
payload = '''
{
"head": "93250f1308daef66c5809fe87fc242d092e61db7",
"ref": "refs/heads/master",
"size": 1,
"commits": [
{
"sha": "93250f1308daef66c5809fe87fc242d092e61db7",
"message": "Clean up braces",
"author": {
"name": "Frances Berriman",
"email": "[email protected]"
},
"url": "https://github.com/chimecms/chime-starter/commit/93250f1308daef66c5809fe87fc242d092e61db7",
"distinct": true
}
]
}
'''
with HTTMock(self.mock_github_request):
response = self.client.post('/', data=payload)
self.assertTrue(response.status_code in range(200, 299))
# in TestPublishApp
def test_load(self):
from chime import publish
''' makes sure that the file loads properly
'''
self.assertIsNotNone(publish.logger)
if __name__ == '__main__':
main()
| bsd-3-clause | -1,849,253,213,281,568,800 | 54.81523 | 542 | 0.620662 | false | 3.973453 | true | false | false |
Yipit/excellent | excellent/backends/xl_backend.py | 1 | 4473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright <2013> Gabriel Falcao <[email protected]>
# Copyright <2013> Suneel Chakravorty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
from xlwt import XFStyle, Alignment, Workbook
from excellent.exceptions import TooManyRowsError
from .base import BaseBackend
default_style = XFStyle()
bold_style = XFStyle()
bold_style.alignment.horz = Alignment.HORZ_RIGHT
bold_style.font.bold = True
# Excel has issues when creating too many styles/fonts, hence we use
# a cache to reuse font instances (see FAQ#13 http://poi.apache.org/faq.html)
STYLE_CACHE = {}
EXCEL_CHAR_WIDTH = 275
EXCEL_MIN_COL_WIDTH = 3000
def hash_style(style):
"""
This ugly function allows us to get a hash for xlwt Style instances. The
hash allows us to determine that two Style instances are the same, even if
they are different objects.
"""
font_attrs = ["font", "alignment", "borders", "pattern", "protection"]
attrs_hashes = [hash(frozenset(getattr(style, attr).__dict__.items())) for attr in font_attrs]
return hash(sum(attrs_hashes + [hash(style.num_format_str)]))
def get_column_width(value):
return max(len(value) * EXCEL_CHAR_WIDTH, EXCEL_MIN_COL_WIDTH)
class XL(BaseBackend):
def __init__(self, workbook=None, default_style=default_style):
self.workbook = workbook or Workbook()
self.current_sheet = None
self.current_row = 0
self.default_style = default_style
def get_header_style(self):
return bold_style
def write_row(self, row, values, style=None, header_row=False, **kwargs):
style = style or self.default_style
if kwargs:
# If there are additional changes in kwargs, we don't want to modify
# the original style, so we make a copy
style = copy.deepcopy(style)
if 'bold' in kwargs:
style.font.bold = kwargs['bold']
if 'bottom_border' in kwargs:
style.borders.bottom = 2
if 'format_string' in kwargs and kwargs['format_string']:
style.num_format_str = kwargs['format_string']
style_hash = hash_style(style)
if style_hash in STYLE_CACHE:
style = STYLE_CACHE[style_hash]
else:
STYLE_CACHE[style_hash] = style
for index, value in enumerate(values):
if header_row:
column_width = get_column_width(value=value)
self.current_sheet.col(index).width = column_width
row.write(index, value, style)
def write(self, data, output, style=None, **kwargs):
if not self.current_sheet:
self.use_sheet('Sheet1')
header_style = self.get_header_style()
for i, row in enumerate(data, self.current_row):
if self.current_row is 0:
self.write_row(self.get_row(0), row.keys(), header_style, header_row=True, **kwargs)
self.write_row(self.get_row(i + 1), row.values(), style=style, header_row=False, **kwargs)
self.current_row = i + 1
def get_row(self, row_index):
sheet = self.current_sheet
try:
return sheet.row(row_index)
except ValueError:
# The max number of rows have been written
raise TooManyRowsError()
def get_sheets(self):
return self.workbook._Workbook__worksheets
def get_or_create_sheet(self, name):
for sheet in self.get_sheets():
if sheet.name == name:
return sheet, sheet.rows and max(sheet.rows.keys()) or 0
return self.workbook.add_sheet(name), 0
def use_sheet(self, name):
self.current_sheet, self.current_row = self.get_or_create_sheet(name)
def save(self, output):
self.workbook.save(output)
super(XL, self).save(output)
| gpl-3.0 | 1,392,645,152,268,931,300 | 35.072581 | 103 | 0.646546 | false | 3.666393 | false | false | false |
GabrielBrascher/cloudstack | test/integration/component/test_ldap_auto_import.py | 3 | 22445 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (
updateConfiguration,
deleteAccount,
addLdapConfiguration,
linkDomainToLdap,
deleteLdapConfiguration,
disableAccount)
from marvin.lib.common import get_domain
from marvin.lib.base import (Account,
Configurations,
Domain)
from marvin.cloudstackAPI import login
from marvin.lib.utils import (cleanup_resources)
from nose.plugins.attrib import attr
import telnetlib
import random
import string
def randomword(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
def addLdapConfiguration1(cls, ldapConfiguration):
"""
:param ldapConfiguration
"""
cls.chkConfig = checkLdapConfiguration(cls, ldapConfiguration)
if not cls.chkConfig:
return 0
# Setup Global settings
Configurations.update(
cls.apiClient,
name="ldap.basedn",
value=ldapConfiguration['basedn']
)
Configurations.update(
cls.apiClient,
name="ldap.bind.password",
value=ldapConfiguration['bindpassword']
)
Configurations.update(
cls.apiClient,
name="ldap.bind.principal",
value=ldapConfiguration['principal']
)
Configurations.update(
cls.apiClient,
name="ldap.email.attribute",
value=ldapConfiguration['emailAttribute']
)
Configurations.update(
cls.apiClient,
name="ldap.user.object",
value=ldapConfiguration['userObject']
)
Configurations.update(
cls.apiClient,
name="ldap.username.attribute",
value=ldapConfiguration['usernameAttribute']
)
Configurations.update(
cls.apiClient,
name="ldap.nested.groups.enable",
value="true"
)
ldapServer = addLdapConfiguration.addLdapConfigurationCmd()
ldapServer.hostname = ldapConfiguration['hostname']
ldapServer.port = ldapConfiguration['port']
cls.debug("calling addLdapConfiguration API command")
try:
cls.apiClient.addLdapConfiguration(ldapServer)
cls.debug("addLdapConfiguration was successful")
return 1
except Exception as e:
cls.debug(
"addLdapConfiguration failed %s Check the Passed passed"
" ldap attributes" %
e)
cls.reason = "addLdapConfiguration failed %s Check the Passed " \
"passed ldap attributes" % e
raise Exception(
"addLdapConfiguration failed %s Check the Passed passed"
" ldap attributes" %
e)
return 1
def checklogin(cls, username, password, domain, method):
"""
:param username:
:param password:
"""
cls.debug("Attempting to login.")
try:
loginParams = login.loginCmd()
loginParams.username = username
loginParams.password = password
loginParams.domain = domain
loginRes = cls.apiClient.login(loginParams, method)
cls.debug("login response %s" % loginRes)
if loginRes is None:
cls.debug("login not successful")
return 0
else:
cls.debug("login successful")
return 1
except Exception as p:
cls.debug("login operation failed %s" % p)
cls.reason = "Login operation Failed %s" % p
def checkLdapConfiguration(cls, ldapConfiguration):
"""This function checks whether the passed ldap server in
the configuration is up and running or not.
"""
flag = False
try:
tn = telnetlib.Telnet(
ldapConfiguration['hostname'],
ldapConfiguration['port'],
timeout=15)
if tn is not None:
tn.set_debuglevel(1)
print tn.msg("Connected to the server")
cls.debug(
"Ldap Server is Up and listening on the port %s" %
tn.msg("Connected to the server"))
flag = True
tn.close()
except Exception as e:
cls.debug(
"Not able to reach the LDAP server ,"
"please check the Services on LDAP %s and exception is %s" %
((ldapConfiguration['hostname']), e))
cls.reason = "Not able to reach the LDAP server ,please check" \
" the Services on LDAP %s and exception is %s" \
% ((ldapConfiguration['hostname']), e)
return flag
class TestLdap(cloudstackTestCase):
"""
LDAP AutoImport smoke tests
"""
@classmethod
def setUpClass(cls):
"""
:type cls: object
"""
testClient = super(TestLdap, cls).getClsTestClient()
cls.api_client = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.cleanup = []
cls.domain = get_domain(cls.api_client)
cls.delflag = 0
cls.reason = ""
cls.apiClient = cls.testClient.getApiClient()
try:
cls.ldapconfRes = addLdapConfiguration1(
cls, cls.services["configurableData"]["ldap_configuration"])
except Exception as e:
raise Exception("Configuring LDAP failed. Check attributes")
cls.cleanup.append(cls.ldapconfRes)
@classmethod
def tearDownClass(cls):
"""
#cleanup includes : delete normal account, remove ldap configuration
:type cls: object
"""
testClient = super(TestLdap, cls).getClsTestClient()
cls.api_client = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
if cls.ldapconfRes == 1:
ldapserver = deleteLdapConfiguration.deleteLdapConfigurationCmd()
ldapserver.hostname = cls.services["configurableData"][
"ldap_configuration"]["hostname"]
try:
cls.apiClient.deleteLdapConfiguration(ldapserver)
cls.debug("deleteLdapConfiguration was successful")
return 1
except Exception as e:
cls.debug("deleteLdapConfiguration failed %s" % e)
return 0
def setUp(self):
self.user = self.services["configurableData"]["link_ldap_details"]["linkLdapUsername"]
self.password = self.services["configurableData"]["link_ldap_details"]["linkLdapPassword"]
self.delflag1 = 0
self.delflag2 = 0
self.delflag3 = 0
self.delflag4 = 0
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.parent_domain = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.ldaplink = linkDomainToLdap.linkDomainToLdapCmd()
self.ldaplink.domainid = self.parent_domain.id
self.ldaplink.accounttype = self.services[
"configurableData"]["link_ldap_details"]["accounttype"]
self.ldaplink.name = self.services[
"configurableData"]["link_ldap_details"]["name"]
self.ldaplink.type = self.services[
"configurableData"]["link_ldap_details"]["type"]
if self.services["configurableData"][
"link_ldap_details"]["admin"] is not None:
self.ldaplink.admin = self.services[
"configurableData"]["link_ldap_details"]["admin"]
if self.ldaplink.domainid == "" or self.ldaplink.accounttype == "" \
or self.ldaplink.name == "" \
or self.ldaplink.type == "":
self.debug(
"Please rerun the test by providing "
"values in link_ldap configuration user details")
self.skipTest(
"Please rerun the test by providing "
"proper values in configuration file(link ldap)")
else:
self.delflag1 = 1
self.ldaplinkRes = self.apiClient.linkDomainToLdap(self.ldaplink)
self.assertEquals(
self.delflag1,
1,
"Linking LDAP failed,please check the configuration")
loginRes = checklogin(self,
self.user, self.password,
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
lsap_user = Account.list(self.api_client,
domainid=self.parent_domain.id,
name=self.user
)
self.ldapacctID = lsap_user[0].id
def tearDown(self):
try:
self.parent_domain.delete(self.apiclient, cleanup=True)
except Exception as e:
raise Exception(
"Warning: Exception during cleanup of domain : %s" % e)
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
pass
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_01_ldap(self):
"""Check the linkDomainToLdap functionality"""
self.domain1 = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.ldaplink4 = linkDomainToLdap.linkDomainToLdapCmd()
self.ldaplink4.domainid = self.domain1.id
self.ldaplink4.accounttype = self.services[
"configurableData"]["link_ldap_details"]["accounttype"]
self.ldaplink4.name = self.services[
"configurableData"]["link_ldap_details"]["name"]
self.ldaplink4.type = self.services[
"configurableData"]["link_ldap_details"]["type"]
if self.services["configurableData"][
"link_ldap_details"]["admin"] is not None:
self.ldaplink4.admin = self.services[
"configurableData"]["link_ldap_details"]["admin"]
try:
self.ldaplinkRes4 = self.apiClient.linkDomainToLdap(self.ldaplink4)
except Exception as e:
raise Exception(
"Linking LDAP failed,please check the configuration")
try:
self.domain1.delete(self.apiclient)
except Exception as e:
raise Exception(
"Warning: Exception during deletion of domain : %s" % e)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_02_ldap(self):
"""User is both in LDAP and imported into CS(i.e already logged in
once.So just check the log in again)"""
loginRes = checklogin(
self,
self.user,
self.password,
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_03_ldap(self):
"""User in LDAP, wrong password --> login should fail"""
loginRes = checklogin(
self,
self.user,
randomword(8),
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, None, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_04_ldap(self):
"""User is only present locally, password is wrong --> login should
fail"""
loginRes = checklogin(
self,
self.services["configurableData"]["ldap_account"]["username"],
randomword(10),
"",
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, None, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_05_ldap(self):
"""user is not present anywhere --> login should fail"""
loginRes = checklogin(self, randomword(10), randomword(10),
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, None, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_06_ldap(self):
"""Delete the LDAP user from CS and try to login --> User should be
created again"""
try:
deleteAcct2 = deleteAccount.deleteAccountCmd()
deleteAcct2.id = self.ldapacctID
acct_name = self.services["configurableData"][
"link_ldap_details"]["linkLdapUsername"]
self.apiClient.deleteAccount(deleteAcct2)
self.debug(
"Deleted the the following account name %s:" %
acct_name)
except Exception as e:
raise Exception(
"Warning: Exception during deleting "
"ldap imported account : %s" %
e)
loginRes = checklogin(
self,
self.user,
self.password,
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_07_ldap(self):
"""Lock the user from CS and attempt to login --> login should fail"""
self.lockAcct = disableAccount.disableAccountCmd()
self.lockAcct.lock = 'true'
self.lockAcct.account = self.services["configurableData"][
"ldap_account"]["username"]
self.lockAcct.domainid = self.parent_domain.id
self.apiClient.disableAccount(self.lockAcct)
loginRes = checklogin(
self,
self.user,
self.password,
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, None, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_08_ldap(self):
"""Create different domains and link all of them to LDAP. Check
login in each domain --> login should be successful"""
try:
loginRes = checklogin(
self,
self.user,
self.password,
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
self.domain2 = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
# here link ldap to domain
self.ldaplink2 = linkDomainToLdap.linkDomainToLdapCmd()
self.ldaplink2.domainid = self.domain2.id
self.ldaplink2.accounttype = self.services[
"configurableData"]["link_ldap_details"]["accounttype"]
self.ldaplink2.name = self.services[
"configurableData"]["link_ldap_details"]["name"]
self.ldaplink2.type = self.services[
"configurableData"]["link_ldap_details"]["type"]
if self.services["configurableData"][
"link_ldap_details"]["admin"] is not None:
self.ldaplink2.admin = self.services[
"configurableData"]["link_ldap_details"]["admin"]
if self.ldaplink2.domainid == "" \
or self.ldaplink2.accounttype == "" \
or self.ldaplink2.name == "" \
or self.ldaplink2.type == "":
self.debug(
"Please rerun the test by providing"
" values in link_ldap configuration user details")
self.skipTest(
"Please rerun the test by providing "
"proper values in configuration file(link ldap)")
else:
self.delflag2 = 1
self.ldaplinkRes2 = self.apiClient.linkDomainToLdap(
self.ldaplink2)
self.assertEquals(
self.delflag2,
1,
"Linking LDAP failed,please check the configuration")
loginRes = checklogin(
self,
self.user,
self.password,
self.domain2.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
self.domain3 = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
# here link ldap to domain
self.ldaplink3 = linkDomainToLdap.linkDomainToLdapCmd()
self.ldaplink3.domainid = self.domain3.id
self.ldaplink3.accounttype = self.services[
"configurableData"]["link_ldap_details"]["accounttype"]
self.ldaplink3.name = self.services[
"configurableData"]["link_ldap_details"]["name"]
self.ldaplink3.type = self.services[
"configurableData"]["link_ldap_details"]["type"]
if self.services["configurableData"][
"link_ldap_details"]["admin"] is not None:
self.ldaplink3.admin = self.services[
"configurableData"]["link_ldap_details"]["admin"]
if self.ldaplink3.domainid == "" \
or self.ldaplink3.accounttype == "" \
or self.ldaplink3.name == "" \
or self.ldaplink3.type == "":
self.debug(
"Please rerun the test by providing"
" values in link_ldap configuration user details")
self.skipTest(
"Please rerun the test by providing "
"proper values in configuration file(link ldap)")
else:
self.delflag3 = 1
self.ldaplinkRes3 = self.apiClient.linkDomainToLdap(
self.ldaplink3)
self.assertEquals(
self.delflag3,
1,
"Linking LDAP failed,please check the configuration")
loginRes = checklogin(
self,
self.user,
self.password,
self.domain2.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
finally:
try:
self.domain2.delete(self.apiclient, cleanup=True)
except Exception as e:
raise Exception(
"Warning: Exception during deletion of domain : %s" % e)
try:
self.domain3.delete(self.apiclient, cleanup=True)
except Exception as e:
raise Exception(
"Warning: Exception during deletion of domain : %s" % e)
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_09_ldap(self):
""" Enable nested groups and try to login with a user that is in
nested group --> login should be successful"""
if self.services["configurableData"]["link_ldap_details"]["linkLdapNestedUser"] == "":
self.skipTest("No nested user mentioned")
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "ldap.nested.groups.enable"
updateConfigurationCmd.value = 'true'
self.apiClient.updateConfiguration(updateConfigurationCmd)
loginRes = checklogin(
self,
self.services["configurableData"]["link_ldap_details"]["linkLdapNestedUser"],
self.services["configurableData"]["link_ldap_details"]["linkLdapNestedPassword"],
self.parent_domain.name,
method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, 1, self.reason)
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_10_ldap(self):
"""Check db tables"""
db_check = 1
domainID = self.dbclient.execute(
"SELECT id FROM domain WHERE uuid=" + "'" +
self.parent_domain.id + "'" + ";",
db="cloud")
dbChecking = self.dbclient.execute(
"SELECT type,name,account_type "
"FROM ldap_trust_map WHERE domain_id=" + "'" +
str(domainID[0][0]) + "'" + ";",
db="cloud")
if dbChecking is not None and str(
dbChecking[0][0]) == \
self.services["configurableData"]["link_ldap_details"]["type"] \
and str(
dbChecking[0][1]) == \
self.services["configurableData"]["link_ldap_details"]["name"] \
and str(
dbChecking[0][2]) == \
self.services["configurableData"]["link_ldap_details"]["accounttype"]:
db_check = 0
self.assertEquals(db_check, 0, "DB check failed")
@attr(tags=["advanced", "basic"], required_hardware="true")
def test_11_ldap(self):
"""Password/domain empty --> login should fail"""
loginRes = checklogin(
self,
"", "", self.parent_domain.name, method="POST")
self.debug(loginRes)
self.assertEquals(loginRes, None, self.reason)
| apache-2.0 | 5,542,287,763,679,164,000 | 36.533445 | 98 | 0.573624 | false | 4.434894 | true | false | false |
bzhou26/leetcode_sol | p102_Binary_Tree_Level_Order_Traversal.py | 1 | 1201 | '''
- Leetcode problem: 102
- Difficulty: Medium
- Brief problem description:
Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
- Solution Summary:
BFS solution with deque
- Used Resources:
--- Bo Zhou
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
result = []
q = collections.deque()
if root:
q.append(root)
while q:
n = len(q)
currentLevel = []
for i in range(n):
node = q.popleft()
currentLevel.append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
result.append(currentLevel)
return result
| mit | -3,779,501,425,278,980,000 | 19.016667 | 117 | 0.537885 | false | 3.511696 | false | false | false |
bardin-lab/readtagger | readtagger/cli/plot_coverage.py | 1 | 1420 | import click
from matplotlib.style import available
from readtagger.plot_coverage import plot_coverage_in_regions
from readtagger import VERSION
@click.command('Plot relative coverage for alignment files.')
@click.option('-f',
'--file',
type=(str, str, int),
multiple=True,
help="File, label and number of total reads in file.")
@click.argument('output_path')
@click.option('-c',
'--cores',
help='Cores to use when calculating coverage',
default=1)
@click.option('-r',
'--regions',
help='Regions to plot. If not specified plots all contigs.')
@click.option('-k',
'--plot_kind',
default='area',
type=click.Choice(['area', 'line']),
help='Kind of plot.')
@click.option('-s',
'--style',
type=click.Choice(available),
default='ggplot')
@click.version_option(version=VERSION)
def plot_coverage(**kwargs):
"""Plot coverage differences between file1 and file2."""
file_tuples = kwargs.pop('file')
kwargs['files'] = [_[0] for _ in file_tuples]
kwargs['labels'] = [_[1] for _ in file_tuples]
kwargs['total_reads'] = [_[2] for _ in file_tuples]
regions = kwargs.get('regions')
if regions:
kwargs['regions'] = regions.split(',')
plot_coverage_in_regions(**kwargs)
| mit | -8,497,225,324,329,508,000 | 33.634146 | 74 | 0.575352 | false | 4.045584 | false | false | false |
metacloud/python-troveclient | troveclient/utils.py | 1 | 8166 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import uuid
import simplejson as json
import six
import prettytable
from troveclient.openstack.common.apiclient import exceptions
from troveclient.openstack.common import strutils
def arg(*args, **kwargs):
"""Decorator for CLI args."""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs):
"""
returns the first environment variable set
if none are non-empty, defaults to '' or keyword arg default
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.arguments.insert(0, (args, kwargs))
def unauthenticated(f):
"""
Adds 'unauthenticated' attribute to decorated function.
Usage:
@unauthenticated
def mymethod(f):
...
"""
f.unauthenticated = True
return f
def isunauthenticated(f):
"""
Checks to see if the function is marked as not requiring authentication
with the @unauthenticated decorator. Returns True if decorator is
set to True, False otherwise.
"""
return getattr(f, 'unauthenticated', False)
def service_type(stype):
"""
Adds 'service_type' attribute to decorated function.
Usage:
@service_type('database')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""
Retrieves service type from function
"""
return getattr(f, 'service_type', None)
def translate_keys(collection, convert):
for item in collection:
keys = list(item.__dict__.keys())
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _output_override(objs, print_as):
"""
If an output override global flag is set, print with override
raise BaseException if no printing was overridden.
"""
if 'json_output' in globals() and json_output:
if print_as == 'list':
new_objs = []
for o in objs:
new_objs.append(o._info)
elif print_as == 'dict':
new_objs = objs
# pretty print the json
print(json.dumps(new_objs, indent=' '))
else:
raise BaseException('No valid output override')
def _print(pt, order):
if sys.version_info >= (3, 0):
print(pt.get_string(sortby=order))
else:
print(strutils.safe_encode(pt.get_string(sortby=order)))
def print_list(objs, fields, formatters={}, order_by=None, obj_is_dict=False):
try:
_output_override(objs, 'list')
return
except BaseException:
pass
mixed_case_fields = []
pt = prettytable.PrettyTable([f for f in fields], caching=False)
pt.aligns = ['l' for f in fields]
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not obj_is_dict:
data = getattr(o, field_name, '')
else:
data = o.get(field_name, '')
row.append(data)
pt.add_row(row)
if order_by is None:
order_by = fields[0]
_print(pt, order_by)
def print_dict(d, property="Property"):
try:
_output_override(d, 'dict')
return
except BaseException:
pass
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.aligns = ['l', 'l']
[pt.add_row(list(r)) for r in six.iteritems(d)]
_print(pt, property)
def find_resource(manager, name_or_id):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id))
except exceptions.NotFound:
pass
if sys.version_info <= (3, 0):
name_or_id = strutils.safe_decode(name_or_id)
# now try to get entity as uuid
try:
uuid.UUID(name_or_id)
return manager.get(name_or_id)
except (ValueError, exceptions.NotFound):
pass
try:
try:
return manager.find(human_id=name_or_id)
except exceptions.NotFound:
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
try:
return manager.find(display_name=name_or_id)
except (UnicodeDecodeError, exceptions.NotFound):
try:
# Instances does not have name, but display_name
return manager.find(display_name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
except exceptions.NoUniqueMatch:
msg = ("Multiple %s matches found for '%s', use an ID to be more"
" specific." % (manager.resource_class.__name__.lower(),
name_or_id))
raise exceptions.CommandError(msg)
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
def safe_issubclass(*args):
"""Like issubclass, but will just return False if not a class."""
try:
if issubclass(*args):
return True
except TypeError:
pass
return False
# http://code.activestate.com/recipes/
# 577257-slugify-make-a-string-usable-in-a-url-or-filename/
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
Make use strutils.to_slug from openstack common
"""
return strutils.to_slug(value, incoming=None, errors="strict")
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
| apache-2.0 | 6,266,213,611,314,955,000 | 27.652632 | 78 | 0.600906 | false | 3.977594 | false | false | false |
Sylvermyst-Technologies/BreezyNS | src/pycfdmesh/mesh.py | 1 | 13794 | '''
This file is a part of BreezyNS - a simple, general-purpose 2D airflow calculator.
Copyright (c) 2013, Brendan Gray
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Created on 26 Nov 2013
@author: AlphanumericSheepPig
'''
import math
from pycfdmesh.geometry import Point, BoundingBox, PointList#, Polygon
from pycfdalg.usefulstuff import removeDuplicates
class Element():
'''
The Element object is a Quadtree. Each element may potentially be split into four smaller elements.
If the element is polled for anything, then it will return it's value only if it is a leaf. Otherwise,
it will poll its children instead and pass on the result.
'''
def __init__(self, center, cellSize, maxCellSize, minCellSize, parent = None):
self.isLeaf = True
self.isSolid = None
self.isBoundary = False
self.Boundary = None
self.parent = parent
self.children = []
self.cellSize = cellSize
self.maxCellSize = maxCellSize
self.minCellSize = minCellSize
self.center = center
self.boundingBox = BoundingBox(center, cellSize/2)
# We need a simple, direction-agnostic way of storing our neighbours.
self.neighbours = {'up':None, 'down':None, 'left':None, 'right':None}
#print(" Created new element at",center,"with size",cellSize)
def getBoundingBox(self):
return self.boundingBox
def getNeighbour(self, direction):
'''
Returns the element located immediately above, by finding the element (cellSize+minCellSize)/2 away.
"direction" is a string that's either 'up', 'down', 'left' or 'right'.
'''
distance = (self.cellSize+self.minCellSize)/2
if direction == 'up':
queryPoint = Point(0,1)
elif direction == 'down':
queryPoint = Point(0,-1)
elif direction == 'left':
queryPoint = Point(-1,0)
elif direction == 'right':
queryPoint = Point(1,0)
else:
# This also serves as a check to see that the direction is a valid key for the neighbour dict.
raise Exception("Error: Cannot interpret direction given while trying to find a neighbour.")
neighbourLocation = self.center + queryPoint.scaledBy(distance)
# If we've found a neighbour before, then we can save time by querying them directly to see if it's changed.
# If we don't have a neighbour yet, we'll ask our parent if they can find our neighbour for us.
if self.neighbours[direction]:
self.neighbours[direction] = self.neighbours[direction].getElementAtPoint(neighbourLocation)
return self.neighbours[direction]
else:
self.neighbours[direction] = self.parent.getElementAtPoint(neighbourLocation)
return self.neighbours[direction]
def getAllElements(self):
'''
Returns a list of all leaf elements within the current element.
'''
if self.isLeaf:
if self.isSolid:
return []
return [self]
else:
elementList = []
for c in self.children:
elementList += c.getAllElements()
return elementList
def getElementAtPoint(self, point):
'''
Gets the leaf element that contains a point.
'''
# Start of by checking if this element contains the point. If not, there's no need to go further, but
# presumably, some element somewhere needs an answer. So, we ask the parent to find it for us.
if not self.boundingBox.containsPoint(point):
return self.parent.getElementAtPoint(point)
if self.isLeaf:
if self.isSolid:
return None
return self
else:
for child in self.children:
# We MUST ensure that we only poll the child that definitely contains the point, otherwise
# it will poll it's parent (this instance), which will poll the child again, and so on.
if child.boundingBox.containsPoint(point):
return child.getElementAtPoint(point)
def getPointList(self):
'''
If its a leaf, this returns the points defining the corners of the cell. Otherwise, it
returns a list of points for all of its children.
'''
if self.isLeaf:
if self.isSolid:
return PointList()
return self.boundingBox.getPointList()
else:
pointList = PointList()
for child in self.children:
pointList += child.getPointList()
return pointList
def getPolygons(self):
'''
Returns a list of Polygon objects defining each leaf element.
'''
if self.isLeaf:
if self.isSolid:
return []
return [self.boundingBox.getPolygon()]
else:
polyList = []
for child in self.children:
polyList += child.getPolygons()
return polyList
def split(self):
if self.isLeaf:
newCellSize = self.cellSize/2
if newCellSize > self.minCellSize:
self.isLeaf = False
topLeft = Point(self.center.x - newCellSize/2, self.center.y + newCellSize/2)
topRight = Point(self.center.x + newCellSize/2, self.center.y + newCellSize/2)
bottomRight = Point(self.center.x + newCellSize/2, self.center.y - newCellSize/2)
bottomLeft = Point(self.center.x - newCellSize/2, self.center.y - newCellSize/2)
self.children.append(Element(topLeft, newCellSize, self.maxCellSize, self.minCellSize, self))
self.children.append(Element(topRight, newCellSize, self.maxCellSize, self.minCellSize, self))
self.children.append(Element(bottomRight, newCellSize, self.maxCellSize, self.minCellSize, self))
self.children.append(Element(bottomLeft, newCellSize, self.maxCellSize, self.minCellSize, self))
for c in self.children:
c.fixNeighbourCellSizes()
def getNeighbours(self):
directions = ['up','down','left','right']
neighbours = []
for d in directions:
neighbours.append(self.getNeighbour(d))
return neighbours
def fixNeighbourCellSizes(self):
'''
Checks the cell size of all neighbouring elements. If any of them are larger than twice the current cell size,
then they are refined until they meet this criteria.
'''
directions = ['up','down','left','right']
for d in directions:
n = self.getNeighbour(d)
if n: # There won't be any neighbour on the edge.
#print ("Checking",n, "since it's a neighbour of",self)
while self.isLeaf and n.cellSize > 2*self.cellSize:
#print (" ",n,"is too large.")
n.split()
n = self.getNeighbour(d)
def __repr__(self):
if self.isSolid:
solid = "Solid. "
else:
solid = ""
if self.isBoundary:
boundary = " Boundary. "
else:
boundary = ""
return "Element at "+str(self.center)+" with size "+str(self.cellSize)+". "+solid+boundary
class Mesh():
'''
The mesh object contains a uniform cartesian grid of the largest possible cell size.
"Mesh.elements" contains a list of the root Elements.
The (i,j)th root element, is given by "Mesh.elements[j*horizontalCellCount+i]".
'''
def __init__(self, bottomLeft, horizontalCellCount, verticalCellCount, maxCellSize, minCellSize):
'''
"bottomLeft" is a Point, and an n x m mesh of square cells with dimension maxCellSize is generated above
and to the right of this point, where n is given by "horizontalCellCount" and m is "verticalCellCount".
'''
self.horizontalCellCount = horizontalCellCount
self.verticalCellCount = verticalCellCount
self.maxCellSize = maxCellSize
self.minCellSize = minCellSize
elements = []
for i in range(horizontalCellCount):
for j in range(verticalCellCount):
center = bottomLeft + Point(maxCellSize/2 + i*maxCellSize, maxCellSize/2 + j*maxCellSize)
elements.append(Element(center, maxCellSize, maxCellSize, minCellSize, self))
self.elements = elements
self.bottomLeft = bottomLeft
width = horizontalCellCount*maxCellSize/2
height = verticalCellCount*maxCellSize/2
center = bottomLeft + Point(width, height)
self.boundingBox = BoundingBox(center, width, height)
def getElementAtPoint(self, point):
'''
Returns a leaf Element which contains the point.
'''
# First, we check that the point does fall inside the mesh.
if not self.boundingBox.containsPoint(point):
return None
# Since the root elements in the mesh have a fixed size and spatial arrangement, it's simple to
# figure out which root element a point is in without having to poll any other elements.
# Start by converting the point in (x,y) in global units into a relative coord (i,j) measured in cell counts.
relativeLocation = (point - self.bottomLeft).scaledBy(1/self.maxCellSize)
i = math.floor(relativeLocation.x)
j = math.floor(relativeLocation.y)
# Figure out which element that is.
e = self.elements[i*self.verticalCellCount+j]
# As a safety net, we check that the element does contain the point. If it doesn't, we risk infinite
# recursion. There's probably something wrong if that happens, so let's raise an exception.
if e.boundingBox.containsPoint(point):
return e.getElementAtPoint(point)
else:
print("Need to query an element",e,"for a point ",point,", but it's the wrong element.")
raise Exception("Fatal Error: Parent mesh attempted to query an element for a point it did not contain.")
def getPolygons(self):
'''
Returns a list of Polygon objects defining each leaf element.
'''
polyList = []
for e in self.elements:
polyList += e.getPolygons()
return polyList
def getElementsAroundPoint(self, point, distance=None):
'''
Returns a list of elements containing the element at point, and the four elements distance in each direction.
If distance is not specified, it defaults to minCellSize/2
'''
if not distance:
distance = self.minCellSize/2
up = self.getElementAtPoint(point + Point( 0, 1).scaledBy(distance))
down = self.getElementAtPoint(point + Point( 0,-1).scaledBy(distance))
left = self.getElementAtPoint(point + Point(-1, 0).scaledBy(distance))
right = self.getElementAtPoint(point + Point( 1, 0).scaledBy(distance))
center = self.getElementAtPoint(point)
return removeDuplicates([up, down, left, right, center])
def refineAlongLine(self, line):
# We calculate the number of steps needed to ensure that we don't miss any cells.
nSteps = math.ceil(line.length()/self.minCellSize)
if nSteps < 1:
print('Got a line of length',line.length())
# nSteps is the number of line segments. Number of points to check is nSteps + 1
for i in range(nSteps+1):
thisPoint = line.startPoint + (line.endPoint-line.startPoint).scaledBy(i/nSteps)
e = self.getElementAtPoint(thisPoint)
while e and e.cellSize/2 > e.minCellSize:
for element in self.getElementsAroundPoint(thisPoint):
element.split()
e = self.getElementAtPoint(thisPoint)
def refineAlongPolygon(self, polygon):
counter = 0
for line in polygon.lines:
counter += 1
# print(" Resolving along line",counter,":", line)
self.refineAlongLine(line)
# print(" Detecting solid cells")
self.markSolidCells(polygon)
def getAllElements(self):
elementList = []
for e in self.elements:
elementList += e.getAllElements()
return elementList
def markSolidCells(self, polygon):
'''
Marks all elements in polygon as solid.
'''
for e in self.getAllElements():
if e.isSolid==None and polygon.containsBoundingBox(e.getBoundingBox()):
e.isSolid = True
else:
e.isSolid = False
| mit | -9,068,800,193,180,801,000 | 38.1875 | 119 | 0.600623 | false | 4.330926 | false | false | false |
shepdelacreme/ansible | lib/ansible/modules/cloud/scaleway/scaleway_server_facts.py | 48 | 6590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: scaleway_server_facts
short_description: Gather facts about the Scaleway servers available.
description:
- Gather facts about the Scaleway servers available.
version_added: "2.7"
author:
- "Yanis Guenane (@Spredzy)"
- "Remy Leone (@sieben)"
extends_documentation_fragment: scaleway
options:
region:
version_added: "2.8"
description:
- Scaleway region to use (for example par1).
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
'''
EXAMPLES = r'''
- name: Gather Scaleway servers facts
scaleway_server_facts:
region: par1
'''
RETURN = r'''
---
scaleway_server_facts:
description: Response from Scaleway API
returned: success
type: complex
contains:
"scaleway_server_facts": [
{
"arch": "x86_64",
"boot_type": "local",
"bootscript": {
"architecture": "x86_64",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"dtb": "",
"id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
"initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
"kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
"organization": "11111111-1111-4111-8111-111111111111",
"public": true,
"title": "x86_64 mainline 4.4.127 rev1"
},
"commercial_type": "START1-XS",
"creation_date": "2018-08-14T21:36:56.271545+00:00",
"dynamic_ip_required": false,
"enable_ipv6": false,
"extra_networks": [],
"hostname": "scw-e0d256",
"id": "12f19bc7-108c-4517-954c-e6b3d0311363",
"image": {
"arch": "x86_64",
"creation_date": "2018-04-26T12:42:21.619844+00:00",
"default_bootscript": {
"architecture": "x86_64",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"dtb": "",
"id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
"initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
"kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
"organization": "11111111-1111-4111-8111-111111111111",
"public": true,
"title": "x86_64 mainline 4.4.127 rev1"
},
"extra_volumes": [],
"from_server": null,
"id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
"modification_date": "2018-04-26T12:49:07.573004+00:00",
"name": "Ubuntu Xenial",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"public": true,
"root_volume": {
"id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
"name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
"size": 25000000000,
"volume_type": "l_ssd"
},
"state": "available"
},
"ipv6": null,
"location": {
"cluster_id": "5",
"hypervisor_id": "412",
"node_id": "2",
"platform_id": "13",
"zone_id": "par1"
},
"maintenances": [],
"modification_date": "2018-08-14T21:37:28.630882+00:00",
"name": "scw-e0d256",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
"private_ip": "10.14.222.131",
"protected": false,
"public_ip": {
"address": "163.172.170.197",
"dynamic": false,
"id": "ea081794-a581-4495-8451-386ddaf0a451"
},
"security_group": {
"id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
"name": "Default security group"
},
"state": "running",
"state_detail": "booted",
"tags": [],
"volumes": {
"0": {
"creation_date": "2018-08-14T21:36:56.271545+00:00",
"export_uri": "device://dev/vda",
"id": "68386fae-4f55-4fbf-aabb-953036a85872",
"modification_date": "2018-08-14T21:36:56.271545+00:00",
"name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
"server": {
"id": "12f19bc7-108c-4517-954c-e6b3d0311363",
"name": "scw-e0d256"
},
"size": 25000000000,
"state": "available",
"volume_type": "l_ssd"
}
}
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.scaleway import (
Scaleway,
ScalewayException,
scaleway_argument_spec,
SCALEWAY_LOCATION,
)
class ScalewayServerFacts(Scaleway):
def __init__(self, module):
super(ScalewayServerFacts, self).__init__(module)
self.name = 'servers'
region = module.params["region"]
self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
module.exit_json(
ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
)
except ScalewayException as exc:
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,159,558,939,065,263,000 | 33.684211 | 113 | 0.511836 | false | 3.298298 | false | false | false |
TheMOOCAgency/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | 1 | 52741 | """
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
import json
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST,require_GET
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError,HttpResponse
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.core.lib.url_utils import quote_slashes
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from openedx.core.djangoapps.course_groups.cohorts import get_course_cohorts, is_course_cohorted, DEFAULT_COHORT_NAME
from student.models import CourseEnrollment,User,CourseEnrollment,CourseEnrollmentAllowed,UserPreprofile
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from bulk_email.models import BulkEmailFlag
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
#GEOFFREY
from course_progress.helpers import get_overall_progress
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from courseware.courses import get_course_by_id
from django.db import connection,connections
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_progress.helpers import get_overall_progress
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
#GEOFFREY 2
from courseware.models import StudentModule
from course_api.blocks.api import get_blocks
from course_api.blocks.views import BlocksInCourseView,BlocksView
from django.db.models import Q
from lms.djangoapps.tma_grade_tracking.models import dashboardStats
from xlwt import *
import os
#GEOFFREY
log = logging.getLogger(__name__)
from pprint import pformat
#AGATHE
from course_progress.helpers import get_overall_progress
from course_progress.models import StudentCourseProgress
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if BulkEmailFlag.feature_enabled(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, reports_enabled))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(_("Enrollment data is now available in {dashboard_link}.")).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
@login_required
def stat_dashboard(request, course_id):
#GET course_key
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_key_modulestore = CourseKey.from_string(course_id)
#course_module
course_module = modulestore().get_course(course_key, depth=0)
#course cutoff
course_cutoff = course_module.grade_cutoffs['Pass']
#GET COURSE
course = get_course_by_id(course_key)
#overview
overview = CourseOverview.get_from_id(course_key)
#Get all course-enrollment
row = User.objects.raw('SELECT a.id ,a.email FROM auth_user a,student_courseenrollment b WHERE a.id=b.user_id AND b.course_id=%s' ,[course_id])
invite = CourseEnrollmentAllowed.objects.all().filter(course_id=course_key)
participant_list = []
all_user = 0
for _user in row:
participant_list.append(_user.email)
all_user = all_user + 1
for _u in invite:
if not str(_u.email) in str(participant_list):
all_user = all_user + 1
#number of user who started the course
user_course_started = 0
#number of users who completed the entire quiz
users_completed_quiz = 0
#count passed
num_passed = 0
#add course average grade
course_average_grade = 0
course_average_grade_global = 0
#number of user who finished the course
user_finished = 0
# Users who completed the quiz entirely
user_completed_quiz = 0
user_completed_quiz_list = []
#course_structure
course_structure = get_course_structure(request,course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
# Users who completed the quiz (overall_progress equals 100.0 only if user completed the quiz)
for user in row:
overall_progress = get_overall_progress(user.id, course_key)
if overall_progress == 100.0:
users_completed_quiz = users_completed_quiz + 1
user_completed_quiz_list.append(user.username)
# connect mongodb return values:
mongo_persist = dashboardStats()
collection = mongo_persist.connect()
find_mongo_persist_course = mongo_persist.find_by_course_id(collection,course_id)
for n in row:
user_id = n.id
users = User.objects.get(pk=user_id)
try:
users_info = find_mongo_persist_course['users_info']
for key, value in users_info.iteritems():
#log.info("user_info key:"+pformat(key)+" value"+pformat(value))
_passed = value['passed']
_percent = value['percent']
user_course_started = user_course_started + 1
# Average grade of all users who completed the quiz
_username = value['username']
if _username in user_completed_quiz_list:
course_average_grade_global = course_average_grade_global + (_percent * 100)
# Average grade of users who passed the quiz
if _passed:
course_average_grade = course_average_grade + (_percent * 100)
user_finished = user_finished + 1
if _percent >= course_cutoff:
num_passed = num_passed + 1
except:
pass
#return context
if user_finished != 0:
final_course_average_grade = round((course_average_grade / user_finished),1)
else :
final_course_average_grade=0.0
if users_completed_quiz !=0:
course_average_grade_global = round((course_average_grade_global / users_completed_quiz), 1)
else :
course_average_grade_global=0.0
#store problems components order
problem_components=[]
for chapter in course_structure:
for section in chapter['children']:
for vertical in section['children']:
for component in vertical['children']:
if 'problem' in str(component):
problem_components.append(str(component))
context = {
"course_id":course_id,
"course":course,
"row":row,
'course_module':course_module,
"all_user":all_user,
"num_passed":num_passed,
"user_course_started":user_course_started,
'course_average_grade':final_course_average_grade,
'course_average_grade_global': course_average_grade_global,
'user_finished':user_finished,
'course_structure':course_structure,
'overview':overview,
'language_course':get_course_langue(course.language),
'problem_components':problem_components
}
return render_to_response('courseware/stat.html', context)
@ensure_csrf_cookie
@login_required
def get_dashboard_username(request,course_id,email):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
row = User.objects.raw('SELECT a.id,a.email,a.first_name,a.last_name FROM auth_user a,student_courseenrollment b WHERE a.id=b.user_id AND b.course_id=%s' ,[course_id])
emails = []
email = str(email).lower()
for n in row:
low = [
n.email.lower(),
n.first_name.lower(),
n.last_name.lower()
]
if email in str(low).lower():
q = {
"values" : [
n.email,
n.first_name,
n.last_name
],
"id":n.email
}
emails.append(q)
response = JsonResponse({
"usernames":emails,
"email":email
})
return response
@ensure_csrf_cookie
@login_required
def stat_dashboard_username(request, course_id, email):
try:
# get users info
users = User.objects.get(email=email)
#user_email
user_email = users.email
lvl_1 = ''
lvl_2 = ''
lvl_3 = ''
lvl_4 = ''
try:
preprofile = UserPreprofile.objects.filter(email=user_email).first()
lvl_1 = preprofile.level_1
lvl_2 = preprofile.level_2
lvl_3 = preprofile.level_3
lvl_4 = preprofile.level_4
except:
pass
#ordered course
course_grade = []
ordered_course_grade=[]
quiz_order=get_quiz_structure(request, course_id)
# get user id
user_id= users.id
# get course_key from url's param
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# get course from course_key
course = get_course_by_id(course_key)
# get all courses block of the site
course_block = StudentModule.objects.all().filter(student_id=user_id,course_id=course_key,max_grade__isnull=False)
# var of grades / course_structure
course_grade = []
# get course_users_info
course_user_info = CourseGradeFactory().create(users, course)
# user info responses
user_info = [
{'Score':str(course_user_info.percent * 100)+'%'},
{'First_name':users.first_name},
{'Last_name':users.last_name},
{'Email':users.email},
{'Niveau_1':lvl_1},
{'Niveau_2':lvl_2},
{'Niveau_3':lvl_3},
{'Niveau_4':lvl_4}
]
for n in course_block:
q = {}
usage_key = n.module_state_key
block_view = BlocksView()
block_name = get_blocks(request,usage_key,depth='all',requested_fields=['display_name'])
root = block_name['root']
display_name = block_name['blocks'][root]['display_name']
q['earned'] = n.grade
q['possible'] = n.max_grade
q['display_name'] = display_name
q['root'] = root
course_grade.append(q)
#Order blocks
for id in quiz_order:
for block in course_grade :
if block['root']==str(id):
ordered_course_grade.append(block)
return JsonResponse({
"course_id":course_id,
"email":email,
"user_id":user_id,
"course_grade": ordered_course_grade,
"user_info": user_info,
"quiz_order":quiz_order
})
except:
return JsonResponse({
"course_id":course_id,
"username":username,
"user_id": '',
"course_grade": [],
"user_info": '',
})
@login_required
def get_course_structure(request, course_id):
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
root = blocks['root']
blocks_overviews = []
try:
children = blocks['blocks'][root]['children']
for z in children:
q = {}
child = blocks['blocks'][z]
q['display_name'] = child['display_name']
q['id'] = child['id']
try:
sub_section = child['children']
q['children'] = []
for s in sub_section:
sub_ = blocks['blocks'][s]
a = {}
a['id'] = sub_['id']
a['display_name'] = sub_['display_name']
vertical = sub_['children']
try:
a['children'] = []
for v in vertical:
unit = blocks['blocks'][v]
w = {}
w['id'] = unit['id']
w['display_name'] = unit['display_name']
try:
w['children'] = unit['children']
except:
w['children'] = []
a['children'].append(w)
except:
a['children'] = []
q['children'].append(a)
except:
q['children'] = []
blocks_overviews.append(q)
except:
children = ''
return blocks_overviews
@ensure_csrf_cookie
@login_required
@require_POST
def get_course_blocks_grade(request,course_id):
data = json.loads(request.body)
data_id = data.get('data_id')
course_block = StudentModule.objects.raw("SELECT id,AVG(grade) AS moyenne,count(id) AS total,MAX(max_grade) AS max_grade,course_id,module_id FROM courseware_studentmodule WHERE course_id = %s AND max_grade IS NOT NULL AND grade <= max_grade GROUP BY module_id", [course_id])
course_grade = {}
for n in course_block:
usage_key = n.module_state_key
block_view = BlocksView()
try:
block_name = get_blocks(request,usage_key,depth='all',requested_fields=['display_name'])
root = block_name['root']
for z in data_id:
if root in z.get('id'):
if not root in course_grade:
course_grade[root] = {}
course_grade[root]['moyenne'] = n.moyenne
course_grade[root]['total'] = n.total
course_grade[root]['max_grade'] = n.max_grade
course_grade[root]['course_id'] = str(n.course_id)
course_grade[root]['module_id'] = str(n.module_state_key)
course_grade[root]['display_name'] = block_name['blocks'][root]['display_name']
course_grade[root]['vertical_name'] = z.get('title')
except:
pass
return JsonResponse({'course_grade':course_grade})
def get_result_page_info(request,course_id):
response = JsonResponse({
"course_id":course_id
})
return response
@ensure_csrf_cookie
@login_required
@require_GET
def get_course_users(request,course_id):
#Get all course-enrollment
"""
UserPreprofile
CourseEnrollment
CourseEnrollmentAllowed
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
invite = CourseEnrollmentAllowed.objects.all().filter(course_id=course_key)
enroll = CourseEnrollment.objects.all().filter(course_id=course_key)
users = []
for _ui in invite:
email = _ui.email
if not str(email) in str(users):
q = {}
q['email'] = email
q['statut'] = 'sent'
q['Nom'] = ''
q['Prenom'] = ''
q['Niveau 1'] = ''
q['Niveau 2'] = ''
q['Niveau 3'] = ''
q['Niveau 4'] = ''
users.append(q)
for _ue in enroll:
try:
email = User.objects.get(pk=_ue.user_id).email
if not str(email) in str(users):
q = {}
q['email'] = email
q['statut'] = 'accepted'
q['Nom'] = ''
q['Prenom'] = ''
q['Niveau 1'] = ''
q['Niveau 2'] = ''
q['Niveau 3'] = ''
q['Niveau 4'] = ''
users.append(q)
else:
for user in users:
if user['email'] == email:
user['statut'] = 'accepted'
except:
pass
for user in users:
try:
email = user['email']
profile = UserPreprofile.objects.filter(email=email).first()
user['Nom'] = profile.last_name
user['Prenom'] = profile.first_name
user['Niveau 1'] = profile.level_1
user['Niveau 2'] = profile.level_2
user['Niveau 3'] = profile.level_3
user['Niveau 4'] = profile.level_4
except:
pass
filename = '{}_registered_users.xls'.format(course_id).replace('+','_')
filepath = '/edx/var/edxapp/'+filename
HEADERS = (u"Nom",u"Prenom",u"Adresse email",u"Niveau 1",u"Niveau 2",u"Niveau 3",u"Niveau 4",u"Statut")
wb = Workbook(encoding='utf-8')
sheet = wb.add_sheet('Users')
for i, header in enumerate(HEADERS):
sheet.write(0, i, header)
j = 0
for i in range(len(users)):
j=j+1
try:
sheet.write(j, 0, users[i]['Nom'])
except:
sheet.write(j, 0, ' ')
try:
sheet.write(j, 1, users[i]['Prenom'])
except:
sheet.write(j, 1, ' ')
try:
sheet.write(j, 2, users[i]['email'])
except:
sheet.write(j, 2, ' ')
try:
sheet.write(j, 3, users[i]['Niveau 1'])
except:
sheet.write(j, 3, ' ')
try:
sheet.write(j, 4, users[i]['Niveau 2'])
except:
sheet.write(j, 4, ' ')
try:
sheet.write(j, 5, users[i]['Niveau 3'])
except:
sheet.write(j, 5, ' ')
try:
sheet.write(j, 6, users[i]['Niveau 4'])
except:
sheet.write(j, 6, ' ')
try:
sheet.write(j, 7, users[i]['statut'])
except:
sheet.write(j, 7, ' ')
wb.save(filepath)
context = {
'filename':filename,
'users':str(users)
}
return JsonResponse(context)
def download_xls(request,filename):
full_path = '/edx/var/edxapp/'+filename
_file = open(full_path,'r')
_content = _file.read()
response = HttpResponse(_content, content_type="application/vnd.ms-excel")
response['Content-Disposition'] = "attachment; filename="+filename
os.remove(full_path)
return response
#generate current_course grade reports
@ensure_csrf_cookie
@login_required
@require_GET
def get_course_users_grades(request,course_id):
# connect mongodb return values:
mongo_persist = dashboardStats()
collection = mongo_persist.connect()
find_mongo_persist_course = mongo_persist.find_by_course_id(collection,course_id)
# get users saved data
users_info = find_mongo_persist_course.get('users_info')
#get users id
users_id = users_info.keys()
q = {
'title': [
'email','first name','last name'
],
'users': []
}
k = 0
for _user_id in users_id:
#try:
current = users_info[_user_id]
user = User.objects.get(pk=users_info[str(_user_id)]["user_id"])
percent = str(current["percent"] * 100)+'%'
summary = current["summary"]["section_breakdown"]
user_info = {
'email':user.email,
'first_name':user.first_name,
'last_name':user.last_name,
'percent': percent,
'grades':[]
}
for section in summary:
if k == 0:
if not section['label'] in q['title']:
q['title'].append(section['label'])
_section = {
'label':section['label'],
'percent':str(section['percent'] * 100)+'%'
}
user_info['grades'].append(_section)
q['users'].append(user_info)
k = k + 1
"""
except:
pass
"""
if not 'final grade' in q['title']:
q['title'].append('final grade')
filename = '{}_grades_reports.xls'.format(course_id).replace('+','_')
filepath = '/edx/var/edxapp/'+filename
HEADERS = q['title']
wb = Workbook(encoding='utf-8')
sheet = wb.add_sheet('Grades')
for i, header in enumerate(HEADERS):
sheet.write(0, i, header)
j = 0
for i in range(len(q['users'])):
j=j+1
try:
sheet.write(j, 0, q['users'][i]['email'])
except:
sheet.write(j, 0, ' ')
try:
sheet.write(j, 1, q['users'][i]['first_name'])
except:
sheet.write(j, 1, ' ')
try:
sheet.write(j, 2, q['users'][i]['last_name'])
except:
sheet.write(j, 2, ' ')
d = 2
for grade in q['users'][i]['grades']:
d = d + 1
try:
sheet.write(j, d, grade['percent'])
except:
sheet.write(j, d, ' ')
d = d + 1
sheet.write(j, d, q['users'][i]['percent'])
wb.save(filepath)
context = {
'filename':filename,
'course_id':course_id
}
return JsonResponse(context)
def download_grades(request,filename):
full_path = '/edx/var/edxapp/'+filename
_file = open(full_path,'r')
_content = _file.read()
response = HttpResponse(_content, content_type="application/vnd.ms-excel")
response['Content-Disposition'] = "attachment; filename="+filename
os.remove(full_path)
return response
def get_list_lang():
language_options_tulp=settings.ALL_LANGUAGES
language_options_dict={}
for lang, label in language_options_tulp:
language_options_dict[lang]=label
return language_options_dict
def get_course_langue(lang_code):
language_options_dict=get_list_lang()
course_language=language_options_dict[lang_code]
return course_language
def get_quiz_structure(request, course_id):
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
course_blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
blocks_overviews = []
quiz_elements=[]
blocks_list=[]
for block in course_blocks['blocks'] :
if course_blocks['blocks'][block].get('children') and "problem" in course_blocks['blocks'][block].get('children')[0]:
blocks_list=course_blocks['blocks'][block]['children']
return blocks_list
| agpl-3.0 | -3,936,923,478,577,118,700 | 39.632512 | 278 | 0.628012 | false | 3.861829 | false | false | false |
gdimitris/ChessPuzzlerBackend | Virtual_Environment/lib/python2.7/site-packages/sqlalchemy/engine/base.py | 49 | 79380 | # engine/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
"""Defines :class:`.Connection` and :class:`.Engine`.
"""
import sys
from .. import exc, util, log, interfaces
from ..sql import util as sql_util
from .interfaces import Connectable, ExceptionContext
from .util import _distill_params
import contextlib
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
def __init__(self, engine, connection=None, close_with_result=False,
_branch_from=None, _execution_options=None,
_dispatch=None,
_has_events=None):
"""Construct a new Connection.
The constructor here is not public and is only called only by an
:class:`.Engine`. See :meth:`.Engine.connect` and
:meth:`.Engine.contextual_connect` methods.
"""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
self.__branch = _branch_from is not None
if _branch_from:
self.__connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
else:
self.__connection = connection \
if connection is not None else engine.raw_connection()
self.__transaction = None
self.__savepoint_seq = 0
self.should_close_with_result = close_with_result
self.__invalid = False
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, self.__branch)
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect() or
contextual_connect() methods are called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
if self.__branch_from:
return self.__branch_from._branch()
else:
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch_from=self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch)
@property
def _root(self):
"""return the 'root' connection.
Returns 'self' if this connection is not a branch, else
returns the root connection from which we ultimately branched.
"""
if self.__branch_from:
return self.__branch_from
else:
return self
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def execution_options(self, **opt):
""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`.Connection` which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`.Connection` references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\\
execute(stmt)
Note that any key/value can be passed to
:meth:`.Connection.execution_options`, and it will be stored in the
``_execution_options`` dictionary of the :class:`.Connection`. It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`.Connection` compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: :class:`.Connection`.
Set the transaction isolation level for
the lifespan of this :class:`.Connection` object (*not* the
underyling DBAPI connection, for which the level is reset
to its original setting upon termination of this
:class:`.Connection` object).
Valid values include
those string values accepted by the
:paramref:`.create_engine.isolation_level`
parameter passed to :func:`.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
Note that this option necessarily affects the underlying
DBAPI connection for the lifespan of the originating
:class:`.Connection`, and is not per-execution. This
setting is not removed until the underlying DBAPI connection
is returned to the connection pool, i.e.
the :meth:`.Connection.close` method is called.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`.Connection.begin` method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. versionchanged:: 0.9.9 A warning is emitted when the
``isolation_level`` execution option is used after a
transaction has been started with :meth:`.Connection.begin`
or similar.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`.Connection` is invalidated, e.g. via
the :meth:`.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:meth:`.Connection.get_isolation_level` - view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`Postgresql Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
.. versionadded:: 0.7.6
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
"""
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
@property
def closed(self):
"""Return True if this connection is closed."""
return '_Connection__connection' not in self.__dict__ \
and not self.__can_reconnect
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self._root.__invalid
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
.. seealso::
:ref:`dbapi_connections`
"""
try:
return self.__connection
except AttributeError:
try:
return self._revalidate_connection()
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this :class:`.Connection`.
This is the isolation level setting that the :class:`.Connection`
has when first procured via the :meth:`.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`.Connection` basis.
Unlike :meth:`.Connection.get_isolation_level`, this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back")
self.__connection = self.engine.raw_connection(_connection=self)
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, 'is_valid', False)
@property
def _still_open_and_connection_is_valid(self):
return \
not self.closed and \
not self.invalidated and \
getattr(self.__connection, 'is_valid', False)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`.Connection`.
"""
return self.connection.info
def connect(self):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def contextual_connect(self, **kwargs):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.contextual_connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`.Connection.execute` method or similar),
this :class:`.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`.Pool` as a source of connectivty (e.g. a "reconnection").
If a transaction was in progress (e.g. the
:meth:`.Connection.begin` method has been called) when
:meth:`.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`.Connection.invalidate` method, just like auto-invalidation,
will at the connection pool level invoke the
:meth:`.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._root._connection_is_valid:
self._root.__connection.invalidate(exception)
del self._root.__connection
self._root.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute("SET search_path TO schema1, schema2")
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`.Connection` instance will remain usable. When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
See also:
:meth:`.Connection.begin_nested` - use a SAVEPOINT
:meth:`.Connection.begin_twophase` - use a two phase /XID transaction
:meth:`.Engine.begin` - context manager available from
:class:`.Engine`.
"""
if self.__branch_from:
return self.__branch_from.begin()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__branch_from:
return self.__branch_from.begin_nested()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress.")
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self._root.__transaction is not None
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if not self.__invalid and \
self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if not self.__invalid and \
self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = 'sa_savepoint_%s' % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _rollback_to_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
self.__transaction = context
def _release_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self._root.in_transaction():
self._root._rollback_impl()
def close(self):
"""Close this :class:`.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`.Pool` referenced
by the :class:`.Engine` that produced this
:class:`.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`.Connection`.
After :meth:`~.Connection.close` is called, the
:class:`.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
try:
del self.__connection
except AttributeError:
pass
finally:
self.__can_reconnect = False
return
try:
conn = self.__connection
except AttributeError:
pass
else:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object, *multiparams, **params).scalar()
def execute(self, object, *multiparams, **params):
"""Executes the a SQL statement construct and returns a
:class:`.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string
* any :class:`.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`~.expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`~.expression.text` construct.
"""
if isinstance(object, util.string_types[0]):
return self._execute_text(object, multiparams, params)
try:
meth = object._execute_on_connection
except AttributeError:
raise exc.InvalidRequestError(
"Unexecutable object type: %s" %
type(object))
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(),
multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = \
fn(self, default, multiparams, params)
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(
dialect, self, conn)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
default, multiparams, params, ret)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = \
fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(dialect=dialect)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = \
fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# note this is usually dict but we support RowProxy
# as well; but dict.keys() as an iterable is OK
keys = distilled_params[0].keys()
else:
keys = []
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1
compiled_sql = self._execution_options['compiled_cache'].get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
self._execution_options['compiled_cache'][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql, distilled_params
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = \
fn(self, compiled, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
compiled, multiparams, params, ret)
return ret
def _execute_text(self, statement, multiparams, params):
"""Execute a string SQL statement."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = \
fn(self, statement, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
statement, multiparams, params, ret)
return ret
def _execute_context(self, dialect, constructor,
statement, parameters,
*args):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except Exception as e:
self._handle_dbapi_exception(
e,
util.text_type(statement), parameters,
None, None)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = context.cursor, \
context.statement, \
context.parameters
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context, context.executemany)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info(
"%r",
sql_util._repr_params(parameters, batches=10)
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor,
statement,
parameters,
context)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor,
statement,
context)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
context.executemany)
if context.compiled:
context.post_exec()
if context.is_crud:
result = context._setup_crud_result_proxy()
else:
result = context.get_result_proxy()
if result._metadata is None:
result._soft_close(_autoclose_connection=False)
if context.should_autocommit and self._root.__transaction is None:
self._root._commit_impl(autocommit=True)
if result._soft_closed and self.should_close_with_result:
self.close()
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context,
False)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
False)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(self,
e,
statement,
parameters,
cursor,
context):
exc_info = sys.exc_info()
if context and context.exception is None:
context.exception = e
if not self._is_disconnect:
self._is_disconnect = \
isinstance(e, self.dialect.dbapi.Error) and \
not self.closed and \
self.dialect.is_disconnect(
e,
self.__connection if not self.invalidated else None,
cursor)
if context:
context.is_disconnect = self._is_disconnect
invalidate_pool_on_disconnect = True
if self._reentrant_error:
util.raise_from_cause(
exc.DBAPIError.instance(statement,
parameters,
e,
self.dialect.dbapi.Error,
dialect=self.dialect),
exc_info
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or \
(statement is not None and context is None)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
connection_invalidated=self._is_disconnect,
dialect=self.dialect)
else:
sqlalchemy_exception = None
newraise = None
if (self._has_events or self.engine._has_events) and \
not self._execution_options.get(
'skip_user_error_events', False):
# legacy dbapi_error event
if should_wrap and context:
self.dispatch.dbapi_error(self,
cursor,
statement,
parameters,
context,
e)
# new handle_error event
ctx = ExceptionContextImpl(
e, sqlalchemy_exception, self.engine,
self, cursor, statement,
parameters, context, self._is_disconnect)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and \
self._is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = \
self._is_disconnect = ctx.is_disconnect
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = \
ctx.invalidate_pool_on_disconnect
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
self._autorollback()
if newraise:
util.raise_from_cause(newraise, exc_info)
elif should_wrap:
util.raise_from_cause(
sqlalchemy_exception,
exc_info
)
else:
util.reraise(*exc_info)
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self.__connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
connection_invalidated=is_disconnect)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e, sqlalchemy_exception, engine, None, None, None,
None, None, is_disconnect)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and \
is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = \
is_disconnect = ctx.is_disconnect
if newraise:
util.raise_from_cause(newraise, exc_info)
elif should_wrap:
util.raise_from_cause(
sqlalchemy_exception,
exc_info
)
else:
util.reraise(*exc_info)
def default_schema_name(self):
return self.engine.dialect.get_default_schema_name(self)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed this :class:`.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Connection.begin`::
with conn.begin():
conn.execute("some statement", {'x':5, 'y':10})
As well as with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Engine.transaction` - engine-level version of
:meth:`.Connection.transaction`
"""
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Engine.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
def _run_visitor(self, visitorcallable, element, **kwargs):
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(self, exception, sqlalchemy_exception,
engine, connection, cursor, statement, parameters,
context, is_disconnect):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`~.Connection.begin` method of
:class:`.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute("insert into x (a, b) values (1, 2)")
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`.Connection.begin` method::
with connection.begin():
connection.execute("insert into x (a, b) values (1, 2)")
The Transaction object is **not** threadsafe.
See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`,
:meth:`.Connection.begin_nested`.
.. index::
single: thread safety; Transaction
"""
def __init__(self, connection, parent):
self.connection = connection
self._actual_parent = parent
self.is_active = True
@property
def _parent(self):
return self._actual_parent or self
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent.is_active:
return
if self._parent is self:
self.rollback()
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if not self._parent.is_active:
return
self._do_rollback()
self.is_active = False
def _do_rollback(self):
self._parent.rollback()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _do_rollback(self):
if self.is_active:
self.connection._rollback_impl()
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _do_rollback(self):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
See also:
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
def __init__(self, pool, dialect, url,
logging_name=None, echo=None, proxy=None,
execution_options=None
):
self.pool = pool
self.url = url
self.dialect = dialect
self.pool._dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.engine = self
log.instance_logger(self, echoflag=echo)
if proxy:
interfaces.ConnectionProxy._adapt_listener(self, proxy)
if execution_options:
self.update_execution_options(**execution_options)
def update_execution_options(self, **opt):
"""Update the default execution_options dictionary
of this :class:`.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`.create_engine`.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Engine.execution_options`
"""
self._execution_options = \
self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`.Engine` that will provide
:class:`.Connection` objects with the given execution options.
The returned :class:`.Engine` remains related to the original
:class:`.Engine` in that it shares the same connection pool and
other state:
* The :class:`.Pool` used by the new :class:`.Engine` is the
same instance. The :meth:`.Engine.dispose` method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new :class:`.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`.Engine`.
The intent of the :meth:`.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`.Connection` objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`.Connection.info` dictionary, which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. versionadded:: 0.8
.. seealso::
:meth:`.Connection.execution_options` - update execution options
on a :class:`.Connection` object.
:meth:`.Engine.update_execution_options` - update the execution
options for a given :class:`.Engine` in place.
"""
return OptionEngine(self, opt)
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return 'Engine(%r)' % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`.Engine`, so when they are closed individually,
eventually the :class:`.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`.Engine` isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self.contextual_connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.contextual_connect() as conn:
yield conn
else:
yield connection
def _run_visitor(self, visitorcallable, element,
connection=None, **kwargs):
with self._optional_conn_ctx_manager(connection) as conn:
conn._run_visitor(visitorcallable, element, **kwargs)
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type, value, traceback):
if type is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute("insert into table (x, y, z) values (1, 2, 3)")
conn.execute("my_special_procedure(5)")
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`.Connection` is in "single use" mode, where the
:class:`.ResultProxy` returned by the first call to
:meth:`.Connection.execute` will close the :class:`.Connection` when
that :class:`.ResultProxy` has exhausted all result rows.
.. versionadded:: 0.7.6
See also:
:meth:`.Engine.connect` - procure a :class:`.Connection` from
an :class:`.Engine`.
:meth:`.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`.Connection`.
"""
conn = self.contextual_connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed a :class:`.Connection` newly procured
from :meth:`.Engine.contextual_connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Connection.transaction` - connection-level version of
:meth:`.Engine.transaction`
"""
with self.contextual_connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Connection.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
with self.contextual_connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`.
The arguments are the same as those used by
:meth:`.Connection.execute`.
Here, a :class:`.Connection` is acquired using the
:meth:`~.Engine.contextual_connect` method, and the statement executed
with that connection. The returned :class:`.ResultProxy` is flagged
such that when the :class:`.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`.Connection` created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
def scalar(self, statement, *multiparams, **params):
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
"""Return a new :class:`.Connection` object.
The :class:`.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`.Pool`
referenced by this :class:`.Engine`. When the
:meth:`~.Connection.close` method of the :class:`.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`~.Engine.connect`.
"""
return self._connection_cls(self, **kwargs)
def contextual_connect(self, close_with_result=False, **kwargs):
"""Return a :class:`.Connection` object which may be part of some
ongoing context.
By default, this method does the same thing as :meth:`.Engine.connect`.
Subclasses of :class:`.Engine` may override this method
to provide contextual behavior.
:param close_with_result: When True, the first :class:`.ResultProxy`
created by the :class:`.Connection` will call the
:meth:`.Connection.close` method of that connection as soon as any
pending result rows are exhausted. This is used to supply the
"connectionless execution" behavior provided by the
:meth:`.Engine.execute` method.
"""
return self._connection_cls(
self,
self._wrap_pool_connect(self.pool.connect, None),
close_with_result=close_with_result,
**kwargs)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection. Default is
the ``contextual_connect`` for this ``Engine``.
"""
with self._optional_conn_ctx_manager(connection) as conn:
if not schema:
schema = self.dialect.default_schema_name
return self.dialect.get_table_names(conn, schema)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
return self.run_callable(self.dialect.has_table, table_name, schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self)
else:
util.reraise(*sys.exc_info())
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by :class:`.Connection`
is not needed. When a :class:`.Connection` object is already
present, the DBAPI connection is available using
the :attr:`.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(
self.pool.unique_connection, _connection)
class OptionEngine(Engine):
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
log.instance_logger(self, echoflag=self.echo)
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or \
self.__dict__.get('_has_events', False)
def _set_has_events(self, value):
self.__dict__['_has_events'] = value
_has_events = property(_get_has_events, _set_has_events)
| mit | -5,926,298,786,782,750,000 | 36.197751 | 84 | 0.589242 | false | 4.764992 | false | false | false |
QuantConnect/Lean | Algorithm.Python/EmaCrossFuturesFrontMonthAlgorithm.py | 3 | 3852 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### This example demonstrates how to implement a cross moving average for the futures front contract
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="indicator" />
### <meta name="tag" content="futures" />
class EmaCrossFuturesFrontMonthAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013, 10, 8)
self.SetEndDate(2013, 10, 10)
self.SetCash(1000000)
future = self.AddFuture(Futures.Metals.Gold);
# Only consider the front month contract
# Update the universe once per day to improve performance
future.SetFilter(lambda x: x.FrontMonth().OnlyApplyFilterAtMarketOpen())
# Symbol of the current contract
self.symbol = None
# Create two exponential moving averages
self.fast = ExponentialMovingAverage(100)
self.slow = ExponentialMovingAverage(300)
self.tolerance = 0.001
self.consolidator = None
# Add a custom chart to track the EMA cross
chart = Chart('EMA Cross')
chart.AddSeries(Series('Fast', SeriesType.Line, 0))
chart.AddSeries(Series('Slow', SeriesType.Line, 0))
self.AddChart(chart)
def OnData(self,slice):
holding = None if self.symbol is None else self.Portfolio.get(self.symbol)
if holding is not None:
# Buy the futures' front contract when the fast EMA is above the slow one
if self.fast.Current.Value > self.slow.Current.Value * (1 + self.tolerance):
if not holding.Invested:
self.SetHoldings(self.symbol, .1)
self.PlotEma()
elif holding.Invested:
self.Liquidate(self.symbol)
self.PlotEma()
def OnSecuritiesChanged(self, changes):
if len(changes.RemovedSecurities) > 0:
# Remove the consolidator for the previous contract
# and reset the indicators
if self.symbol is not None and self.consolidator is not None:
self.SubscriptionManager.RemoveConsolidator(self.symbol, self.consolidator)
self.fast.Reset()
self.slow.Reset()
# We don't need to call Liquidate(_symbol),
# since its positions are liquidated because the contract has expired.
# Only one security will be added: the new front contract
self.symbol = changes.AddedSecurities[0].Symbol
# Create a new consolidator and register the indicators to it
self.consolidator = self.ResolveConsolidator(self.symbol, Resolution.Minute)
self.RegisterIndicator(self.symbol, self.fast, self.consolidator)
self.RegisterIndicator(self.symbol, self.slow, self.consolidator)
# Warm up the indicators
self.WarmUpIndicator(self.symbol, self.fast, Resolution.Minute)
self.WarmUpIndicator(self.symbol, self.slow, Resolution.Minute)
self.PlotEma()
def PlotEma(self):
self.Plot('EMA Cross', 'Fast', self.fast.Current.Value)
self.Plot('EMA Cross', 'Slow', self.slow.Current.Value)
| apache-2.0 | -205,375,199,850,199,550 | 41.8 | 100 | 0.671859 | false | 3.987578 | false | false | false |
ActiveState/code | recipes/Python/578296_Treat_Win32_Registry_like_Pythdict__pure/recipe-578296.py | 1 | 7506 | # From the recipe at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/573466
# A backwards compatible enhancement has been made to allow full access to registry types still through the dictionary metaphor
# converted to use _winreg instead of Win32 API from PyWin32
"""Slightly magical Win32api Registry -> Dictionary-like-object wrapper"""
import cPickle
import _winreg
class RegistryDict(object):
def __init__(self, keyhandle = _winreg.HKEY_LOCAL_MACHINE, keypath = [], flags = _winreg.KEY_READ):
"""If flags=None, then it will create the key.. otherwise pass a _winreg.KEY_* sam"""
self.keyhandle = None
self.open(keyhandle, keypath, flags)
@staticmethod
def massageIncomingRegistryValue((obj, objtype), bReturnType=False):
r=None
if objtype == _winreg.REG_BINARY and obj[:8]=='PyPickle':
obj = obj[8:]
r = (cPickle.loads(obj), objtype)
elif objtype == _winreg.REG_NONE:
r = (None, objtype)
elif objtype in (_winreg.REG_SZ, _winreg.REG_EXPAND_SZ,
_winreg.REG_RESOURCE_LIST, _winreg.REG_LINK,
_winreg.REG_BINARY, _winreg.REG_DWORD,
_winreg.REG_DWORD_LITTLE_ENDIAN, _winreg.REG_DWORD_BIG_ENDIAN,
_winreg.REG_MULTI_SZ):
r = (obj,objtype)
if r == None:
raise NotImplementedError, "Registry type 0x%08X not supported" % (objtype,)
if bReturnType:
return r
else:
return r[0]
def __getitem__(self, key):
bReturnType=False
if (type(key) is tuple) and (len(key)==1):
key = key[0]
bReturnType=True
# is it data?
try:
return self.massageIncomingRegistryValue(_winreg.QueryValueEx(self.keyhandle, key),bReturnType)
except:
if key == '':
# Special case: this dictionary key means "default value"
raise KeyError, key
pass
# it's probably a registry key then
try:
return RegistryDict(self.keyhandle, key, _winreg.KEY_ALL_ACCESS)
except:
pass
# must not be there
raise KeyError, key
def has_key(self, key):
return self.__contains__(key)
def __contains__(self, key):
try:
self.__getitem__(key)
return 1
except KeyError:
return 0
def copy(self):
return dict(self.iteritems())
def __repr__(self):
return repr(self.copy())
def __str__(self):
return self.__repr__()
def __cmp__(self, other):
# Do the objects have the same state?
return self.keyhandle == other.keyhandle
def __hash__(self):
raise TypeError, "RegistryDict objects are unhashable"
def clear(self):
keylist = list(self.iterkeys())
# Two-step to avoid changing the set while iterating over it
for k in keylist:
del self[k]
def iteritems_data(self):
i = 0
# yield data
try:
while 1:
s, obj, objtype = _winreg.EnumValue(self.keyhandle, i)
yield s, self.massageIncomingRegistryValue((obj, objtype))
i += 1
except:
pass
def iteritems_children(self, access=_winreg.KEY_ALL_ACCESS):
i = 0
try:
while 1:
s = _winreg.EnumKey(self.keyhandle, i)
yield s, RegistryDict(self.keyhandle, [s], access)
i += 1
except:
pass
def iteritems(self, access=_winreg.KEY_ALL_ACCESS):
# yield children
for item in self.iteritems_data():
yield item
for item in self.iteritems_children(access):
yield item
def iterkeys_data(self):
for key, value in self.iteritems_data():
yield key
def iterkeys_children(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield key
def iterkeys(self):
for key, value in self.iteritems():
yield key
def itervalues_data(self):
for key, value in self.iteritems_data():
yield value
def itervalues_children(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems_children(access):
yield value
def itervalues(self, access=_winreg.KEY_ALL_ACCESS):
for key, value in self.iteritems(access):
yield value
def items(self, access=_winreg.KEY_ALL_ACCESS):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self, access=_winreg.KEY_ALL_ACCESS):
return list(self.itervalues(access))
def __delitem__(self, key):
# Delete a string value or a subkey, depending on the type
try:
item = self[key]
except:
return # Silently ignore bad keys
itemtype = type(item)
if itemtype is str:
_winreg.DeleteValue(self.keyhandle, key)
elif isinstance(item, RegistryDict):
# Delete everything in the subkey, then the subkey itself
item.clear()
_winreg.DeleteKey(self.keyhandle, key)
else:
raise ValueError, "Unknown item type in RegistryDict"
def __len__(self):
return len(self.items())
def __iter__(self):
return self.iterkeys()
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError, "RegistryDict is empty"
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self,d):
for k,v in d.items():
self.__setitem__(k, v)
def __setitem__(self, item, value):
item = str(item)
pyvalue = type(value)
if pyvalue is tuple and len(value)==2:
valuetype = value[1]
value = value[0]
else:
if pyvalue is dict or isinstance(value, RegistryDict):
d = RegistryDict(self.keyhandle, item)
d.clear()
d.update(value)
return
if pyvalue is str:
valuetype = _winreg.REG_SZ
elif pyvalue is int:
valuetype = _winreg.REG_DWORD
else:
valuetype = _winreg.REG_BINARY
value = 'PyPickle' + cPickle.dumps(value)
_winreg.SetValueEx(self.keyhandle, item, 0, valuetype, value)
def open(self, keyhandle, keypath, flags = None):
if type(keypath) is str:
keypath = keypath.split('\\')
if flags is None:
for subkey in keypath:
keyhandle = _winreg.CreateKey(keyhandle, subkey)
else:
for subkey in keypath:
keyhandle = _winreg.OpenKeyEx(keyhandle, subkey, 0, flags)
self.keyhandle = keyhandle
def close(self):
try:
_winreg.CloseKey(self.keyhandle)
except:
pass
## end of http://code.activestate.com/recipes/573466/ }}}
| mit | 2,110,155,171,997,337,300 | 30.940426 | 127 | 0.555156 | false | 4.172318 | false | false | false |
danmergens/mi-instrument | mi/instrument/wetlabs/fluorometer/flord_d/test/test_driver.py | 4 | 19945 | """
@package mi.instrument.wetlabs.fluorometer.flord_d.test.test_driver
@file marine-integrations/mi/instrument/wetlabs/fluorometer/flort_d/driver.py
@author Tapana Gupta
@brief Test cases for flord_d driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.core.instrument.chunker import StringChunker
from mi.instrument.wetlabs.fluorometer.flort_d.test.test_driver import DriverTestMixinSub
from mi.instrument.wetlabs.fluorometer.flord_d.driver import InstrumentDriver
from mi.instrument.wetlabs.fluorometer.flord_d.driver import FlordProtocol
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordMenuParticle, FlordSampleParticle
from mi.instrument.wetlabs.fluorometer.flort_d.driver import DataParticleType
from mi.instrument.wetlabs.fluorometer.flort_d.driver import InstrumentCommands
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolState
from mi.instrument.wetlabs.fluorometer.flort_d.driver import ProtocolEvent
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Capability
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Parameter
from mi.instrument.wetlabs.fluorometer.flort_d.driver import Prompt
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordMenuParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import FlordSampleParticleKey
from mi.instrument.wetlabs.fluorometer.flort_d.driver import NEWLINE
from mi.core.instrument.instrument_driver import DriverProtocolState, DriverConfigKey
# SAMPLE DATA FOR TESTING
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_MNU_RESPONSE
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_SAMPLE_RESPONSE
from mi.instrument.wetlabs.fluorometer.flord_d.test.sample_data import SAMPLE_MET_RESPONSE
from mi.core.exceptions import InstrumentCommandException, SampleException
__author__ = 'Tapana Gupta'
__license__ = 'Apache 2.0'
log = get_logger()
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.wetlabs.fluorometer.flord_d.driver',
driver_class="FlordInstrumentDriver",
instrument_agent_resource_id='3DLE2A',
instrument_agent_name='wetlabs_fluorometer_flord_d',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverConfigKey.PARAMETERS: {Parameter.RUN_WIPER_INTERVAL: '00:10:00',
Parameter.RUN_CLOCK_SYNC_INTERVAL: '00:10:00',
Parameter.RUN_ACQUIRE_STATUS_INTERVAL: '00:10:00'}}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python, mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class FlordDriverTestMixinSub(DriverTestMixinSub):
"""
Mixin class used for storing data particle constance and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_Driver = InstrumentDriver
_flordD_mnu_parameters = {
FlordMenuParticleKey.SERIAL_NUM: {TYPE: unicode, VALUE: 'BBFL2W-993', REQUIRED: True},
FlordMenuParticleKey.FIRMWARE_VER: {TYPE: unicode, VALUE: 'Triplet5.20', REQUIRED: True},
FlordMenuParticleKey.AVE: {TYPE: int, VALUE: 1, REQUIRED: True},
FlordMenuParticleKey.PKT: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M1D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M2D: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.M1S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlordMenuParticleKey.M2S: {TYPE: float, VALUE: 1.000E+00, REQUIRED: True},
FlordMenuParticleKey.SEQ: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.RAT: {TYPE: int, VALUE: 19200, REQUIRED: True},
FlordMenuParticleKey.SET: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.REC: {TYPE: int, VALUE: 1, REQUIRED: True},
FlordMenuParticleKey.MAN: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordMenuParticleKey.INT: {TYPE: unicode, VALUE: '00:00:10', REQUIRED: True},
FlordMenuParticleKey.DAT: {TYPE: unicode, VALUE: '07/11/13', REQUIRED: True},
FlordMenuParticleKey.CLK: {TYPE: unicode, VALUE: '12:48:34', REQUIRED: True},
FlordMenuParticleKey.MST: {TYPE: unicode, VALUE: '12:48:31', REQUIRED: True},
FlordMenuParticleKey.MEM: {TYPE: int, VALUE: 4095, REQUIRED: True}
}
_flordD_sample_parameters = {
FlordSampleParticleKey.wave_beta: {TYPE: int, VALUE: 700, REQUIRED: True},
FlordSampleParticleKey.raw_sig_beta: {TYPE: int, VALUE: 4130, REQUIRED: True},
FlordSampleParticleKey.wave_chl: {TYPE: int, VALUE: 695, REQUIRED: True},
FlordSampleParticleKey.raw_sig_chl: {TYPE: int, VALUE: 1018, REQUIRED: True},
FlordSampleParticleKey.raw_temp: {TYPE: int, VALUE: 525, REQUIRED: True},
FlordSampleParticleKey.SIG_1_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_2_OFFSET: {TYPE: float, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_1_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True},
FlordSampleParticleKey.SIG_2_SCALE_FACTOR: {TYPE: int, VALUE: 0, REQUIRED: True}
}
# #
# Driver Parameter Methods
# #
def assert_particle_mnu(self, data_particle, verify_values=False):
"""
Verify flortd_mnu particle
@param data_particle: FlortDMNU_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlordMenuParticleKey, self._flordD_mnu_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORDD_MNU)
self.assert_data_particle_parameters(data_particle, self._flordD_mnu_parameters, verify_values)
def assert_particle_sample(self, data_particle, verify_values=False):
"""
Verify flortd_sample particle
@param data_particle: FlortDSample_ParticleKey data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(FlordSampleParticleKey, self._flordD_sample_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.FLORDD_SAMPLE)
self.assert_data_particle_parameters(data_particle, self._flordD_sample_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, FlordDriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommands())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
Get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(FlordProtocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MNU_RESPONSE, 128)
self.assert_chunker_combined_sample(chunker, SAMPLE_MNU_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_MET_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_MET_RESPONSE)
self.assert_chunker_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_SAMPLE_RESPONSE)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_SAMPLE_RESPONSE, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_SAMPLE_RESPONSE)
def test_corrupt_data_sample(self):
particle = FlordMenuParticle(SAMPLE_MNU_RESPONSE.replace('Ave 1', 'Ave foo'))
with self.assertRaises(SampleException):
particle.generate()
particle = FlordSampleParticle(SAMPLE_SAMPLE_RESPONSE.replace('700', 'foo'))
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, SAMPLE_MNU_RESPONSE, self.assert_particle_mnu, True)
self.assert_particle_published(driver, SAMPLE_SAMPLE_RESPONSE, self.assert_particle_sample, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="PortAgentClient")
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [ProtocolEvent.DISCOVER],
ProtocolState.COMMAND: [ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.START_DIRECT,
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.RUN_WIPER,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLOCK_SYNC],
ProtocolState.AUTOSAMPLE: [ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.RUN_WIPER_SCHEDULED,
ProtocolEvent.SCHEDULED_CLOCK_SYNC,
ProtocolEvent.SCHEDULED_ACQUIRE_STATUS,
ProtocolEvent.GET],
ProtocolState.DIRECT_ACCESS: [ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT]
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_command_response(self):
"""
Test response with no errors
Test the general command response will raise an exception if the command is not recognized by
the instrument
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# test response with no errors
protocol._parse_command_response(SAMPLE_MNU_RESPONSE, None)
# test response with 'unrecognized command'
response = False
try:
protocol._parse_command_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
# test correct response with error
response = False
try:
protocol._parse_command_response(SAMPLE_MET_RESPONSE + NEWLINE + 'unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_run_wiper_response(self):
"""
Test response with no errors
Test the run wiper response will raise an exception:
1. if the command is not recognized by
2. the status of the wiper is bad
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# test response with no errors
protocol._parse_run_wiper_response('mvs 1', None)
# test response with 'unrecognized command'
response = False
try:
protocol._parse_run_wiper_response('unrecognized command', None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
# test response with error
response = False
try:
protocol._parse_run_wiper_response("mvs 0" + NEWLINE, None)
except InstrumentCommandException:
response = True
finally:
self.assertTrue(response)
def test_discover_state(self):
"""
Test discovering the instrument in the COMMAND state and in the AUTOSAMPLE state
"""
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# COMMAND state, wait for particles returns an empty list
protocol.wait_for_particles = Mock(return_value=[])
next_state, result = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.COMMAND)
# AUTOSAMPLE state, wait for particles returns one or more particles
protocol.wait_for_particles = Mock(return_value=[1])
next_state, result = protocol._handler_unknown_discover()
self.assertEqual(next_state, DriverProtocolState.AUTOSAMPLE)
def test_create_commands(self):
"""
Test creating different types of commands
1. command with no end of line
2. simple command with no parameters
3. command with parameter
"""
# create the operator commands
mock_callback = Mock()
protocol = FlordProtocol(Prompt, NEWLINE, mock_callback)
# !!!!!
cmd = protocol._build_no_eol_command('!!!!!')
self.assertEqual(cmd, '!!!!!')
# $met
cmd = protocol._build_simple_command('$met')
self.assertEqual(cmd, '$met' + NEWLINE)
# $mnu
cmd = protocol._build_simple_command('$mnu')
self.assertEqual(cmd, '$mnu' + NEWLINE)
# $run
cmd = protocol._build_simple_command('$run')
self.assertEqual(cmd, '$run' + NEWLINE)
# parameters
cmd = protocol._build_single_parameter_command('$ave', Parameter.MEASUREMENTS_PER_REPORTED, 14)
self.assertEqual(cmd, '$ave 14' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m2d', Parameter.MEASUREMENT_2_DARK_COUNT, 34)
self.assertEqual(cmd, '$m2d 34' + NEWLINE)
cmd = protocol._build_single_parameter_command('$m1s', Parameter.MEASUREMENT_1_SLOPE, 23.1341)
self.assertEqual(cmd, '$m1s 23.1341' + NEWLINE)
cmd = protocol._build_single_parameter_command('$dat', Parameter.DATE, '041014')
self.assertEqual(cmd, '$dat 041014' + NEWLINE)
cmd = protocol._build_single_parameter_command('$clk', Parameter.TIME, '010034')
self.assertEqual(cmd, '$clk 010034' + NEWLINE)
| bsd-2-clause | -6,637,229,697,888,127,000 | 46.375297 | 110 | 0.620857 | false | 4.218486 | true | false | false |
gajim/gajim | gajim/common/modules/bits_of_binary.py | 1 | 6373 | # Copyright (C) 2018 Emmanuel Gil Peyrot <linkmauve AT linkmauve.fr>
#
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
import logging
import hashlib
from base64 import b64decode
import nbxmpp
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from gajim.common import app
from gajim.common import configpaths
from gajim.common.modules.base import BaseModule
log = logging.getLogger('gajim.c.m.bob')
class BitsOfBinary(BaseModule):
def __init__(self, con):
BaseModule.__init__(self, con)
self.handlers = [
StanzaHandler(name='iq',
callback=self._answer_bob_request,
typ='get',
ns=Namespace.BOB),
]
# Used to track which cids are in-flight.
self.awaiting_cids = {}
def _answer_bob_request(self, _con, stanza, _properties):
self._log.info('Request from %s for BoB data', stanza.getFrom())
iq = stanza.buildReply('error')
err = nbxmpp.ErrorNode(nbxmpp.ERR_ITEM_NOT_FOUND)
iq.addChild(node=err)
self._log.info('Sending item-not-found')
self._con.connection.send(iq)
raise nbxmpp.NodeProcessed
def _on_bob_received(self, _nbxmpp_client, result, cid):
"""
Called when we receive BoB data
"""
if cid not in self.awaiting_cids:
return
if result.getType() == 'result':
data = result.getTags('data', namespace=Namespace.BOB)
if data.getAttr('cid') == cid:
for func in self.awaiting_cids[cid]:
cb = func[0]
args = func[1]
pos = func[2]
bob_data = data.getData()
def recurs(node, cid, data):
if node.getData() == 'cid:' + cid:
node.setData(data)
else:
for child in node.getChildren():
recurs(child, cid, data)
recurs(args[pos], cid, bob_data)
cb(*args)
del self.awaiting_cids[cid]
return
# An error occurred, call callback without modifying data.
for func in self.awaiting_cids[cid]:
cb = func[0]
args = func[1]
cb(*args)
del self.awaiting_cids[cid]
def get_bob_data(self, cid, to, callback, args, position):
"""
Request for BoB (XEP-0231) and when data will arrive, call callback
with given args, after having replaced cid by it's data in
args[position]
"""
if cid in self.awaiting_cids:
self.awaiting_cids[cid].appends((callback, args, position))
else:
self.awaiting_cids[cid] = [(callback, args, position)]
iq = nbxmpp.Iq(to=to, typ='get')
iq.addChild(name='data', attrs={'cid': cid}, namespace=Namespace.BOB)
self._con.connection.SendAndCallForResponse(
iq, self._on_bob_received, {'cid': cid})
def parse_bob_data(stanza):
data_node = stanza.getTag('data', namespace=Namespace.BOB)
if data_node is None:
return None
cid = data_node.getAttr('cid')
type_ = data_node.getAttr('type')
max_age = data_node.getAttr('max-age')
if max_age is not None:
try:
max_age = int(max_age)
except Exception:
log.exception(stanza)
return None
if cid is None or type_ is None:
log.warning('Invalid data node (no cid or type attr): %s', stanza)
return None
try:
algo_hash = cid.split('@')[0]
algo, hash_ = algo_hash.split('+')
except Exception:
log.exception('Invalid cid: %s', stanza)
return None
bob_data = data_node.getData()
if not bob_data:
log.warning('No data found: %s', stanza)
return None
filepath = configpaths.get('BOB') / algo_hash
if algo_hash in app.bob_cache or filepath.exists():
log.info('BoB data already cached')
return None
try:
bob_data = b64decode(bob_data)
except Exception:
log.warning('Unable to decode data')
log.exception(stanza)
return None
if len(bob_data) > 10000:
log.warning('%s: data > 10000 bytes', stanza.getFrom())
return None
try:
sha = hashlib.new(algo)
except ValueError as error:
log.warning(stanza)
log.warning(error)
return None
sha.update(bob_data)
if sha.hexdigest() != hash_:
log.warning('Invalid hash: %s', stanza)
return None
if max_age == 0:
app.bob_cache[algo_hash] = bob_data
else:
try:
with open(str(filepath), 'w+b') as file:
file.write(bob_data)
except Exception:
log.warning('Unable to save data')
log.exception(stanza)
return None
log.info('BoB data stored: %s', algo_hash)
return filepath
def store_bob_data(bob_data):
if bob_data is None:
return None
algo_hash = '%s+%s' % (bob_data.algo, bob_data.hash_)
filepath = configpaths.get('BOB') / algo_hash
if algo_hash in app.bob_cache or filepath.exists():
log.info('BoB data already cached')
return None
if bob_data.max_age == 0:
app.bob_cache[algo_hash] = bob_data.data
else:
try:
with open(str(filepath), 'w+b') as file:
file.write(bob_data.data)
except Exception:
log.exception('Unable to save data')
return None
log.info('BoB data stored: %s', algo_hash)
return filepath
def get_instance(*args, **kwargs):
return BitsOfBinary(*args, **kwargs), 'BitsOfBinary'
| gpl-3.0 | 107,577,102,495,828,690 | 30.240196 | 77 | 0.577279 | false | 3.809325 | false | false | false |
sql-viewer/SQLGlimpse | sqlviewer/urls.py | 1 | 2544 | """untitled URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
from django.contrib.auth.views import login as login_view
from django.contrib.auth.views import logout as logout_view
from sqlviewer.glimpse import views as glimpse_views
from sqlviewer.connect import views as connect_views
import sqlviewer.glimpse.api
api = [
url(r'^api/v1/models/(?P<model_id>\d+)?$', sqlviewer.glimpse.api.ModelView.as_view()),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)?$', sqlviewer.glimpse.api.VersionView.as_view()),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/tables(/)?$', sqlviewer.glimpse.api.table_details, name='api-table-details'),
url(r'^api/v1/models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/diagrams/(?P<diagram_id>\d+)?$', sqlviewer.glimpse.api.DiagramView.as_view()),
]
pages = [
url(r'^$', glimpse_views.models_list_view, name="home"),
url(r'^admin/', admin.site.urls),
url(r'^models/upload$', glimpse_views.model_upload_view, name='model_upload'),
url(r'^accounts/login/$', login_view, name='login'),
url(r'^accounts/logout/$', logout_view, name='logout'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)$', glimpse_views.model_version_details_view, name='model_details'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/connect$', connect_views.connect_view, name='glimpse-connect'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/search$', glimpse_views.version_search_view, name='version_search'),
url(r'^models/(?P<model_id>\d+)/versions/(?P<version_id>\d+)/diagrams/(?P<diagram_id>\d+)$', glimpse_views.diagram_details_view, name='diagram_details'),
]
urlpatterns = pages + api
| gpl-2.0 | -6,223,715,915,570,532,000 | 52 | 157 | 0.692217 | false | 3.061372 | false | false | false |
demisto/content | Packs/MailListener_-_POP3/Integrations/MailListener_POP3/MailListener_POP3_test.py | 1 | 1261 | # -*- coding: iso-8859-1 -*-
import base64
from MailListener_POP3 import parse_mail_parts
def test_parse_mail_parts():
"""
Given
- Email data
When
- Email contains special characters
Then
- run parse_mail_parts method
- Validate The result body.
"""
class MockEmailPart:
pass
part = MockEmailPart()
part._headers = [['content-transfer-encoding', 'quoted-printable']]
part._payload = "El Ni\xc3\xb1o"
parts = [part]
body, html, attachments = parse_mail_parts(parts)
assert body == 'El Nio'
def test_base64_mail_decode():
"""
Given
- base64 email data which could not be decoded into utf-8
When
- Email contains special characters
Then
- run parse_mail_parts method
- Validate that no exception is thrown
- Validate The result body
"""
class MockEmailPart:
pass
test_payload = 'Foo\xbbBar=='
base_64_encoded_test_payload = base64.b64encode(test_payload)
part = MockEmailPart()
part._headers = [['content-transfer-encoding', 'base64']]
part._payload = base_64_encoded_test_payload
parts = [part]
body, html, attachments = parse_mail_parts(parts)
assert body.replace(u'\uFFFD', '?') == 'Foo?Bar=='
| mit | 4,606,614,345,251,001,300 | 22.792453 | 71 | 0.63521 | false | 3.74184 | true | false | false |
mardix/Webmaster | webmaster/app-skel/templates/advanced/views.py | 1 | 3244 | """
Webmaster: Your Application's View
"""
from webmaster import (View, flash, abort, session, request, url_for,
redirect, flash_data, get_flashed_data)
from webmaster.decorators import (route, menu, template, plugin, methods,
render_as_json, render_as_xml,
require_user_roles, login_required,
no_login_required)
from webmaster.ext import (mailer, cache, storage, recaptcha, csrf)
from webmaster.packages import (contact_page, user, publisher)
from webmaster.exceptions import (ApplicationError, ModelError, UserError)
from application import model
# ------------------------------------------------------------------------------
# /
# This is the entry point of the site
# All root based (/) endpoint could be placed in here
# It extends the contact_page module, to be accessed at '/contact'
#
@menu("Main Menu", order=1)
@plugin(contact_page.contact_page)
@route("/")
class Index(View):
@menu("Home", order=1)
def index(self):
self.meta_tags(title="Hello View!")
return {}
@menu("Boom")
@template("Index/index2.html", version="1.6.9.6")
def boom(self):
return {}
# ------------------------------------------------------------------------------
# /account
# The User Account section
# Extends user.account which forces the whole endpoint to be authenticated
# If an action needs not authentication, @no_login_required can be added
#
@menu("My Account", group_name="account", order=3, align_right=True, visible=False)
@plugin(user.auth, model=model.User)
class Account(View):
@menu("My Account", order=1)
def index(self):
self.meta_tags(title="My Account")
return {}
@menu("Upload Image Demo", order=2)
@route("upload", methods=["GET", "POST"])
def upload(self):
self.meta_tags(title="Upload Demo")
if request.method == "POST":
try:
_file = request.files.get('file')
if _file:
my_object = storage.upload(_file,
prefix="demo/",
public=True,
allowed_extensions=["gif", "jpg", "jpeg", "png"])
if my_object:
return redirect(url_for("Account:upload", object_name=my_object.name))
except Exception as e:
flash_error(e.message)
return redirect(url_for("Account:upload"))
my_object = None
object_name = request.args.get("object_name")
if object_name:
my_object = storage.get(object_name=object_name)
return {"my_object": my_object}
@menu("No Login", order=3)
@no_login_required
def no_login(self):
return {}
# ------------------------------------------------------------------------------
# /blog
# Using the publisher.page, we created a blog
#
@plugin(publisher.page,
model=model.Publisher,
query={"types": ["blog"]},
slug="slug",
title="Blog",
endpoint="",
menu={"name": "Blog"})
class Blog(View):
pass
| mit | -4,195,237,211,000,279,000 | 30.192308 | 96 | 0.527744 | false | 4.268421 | false | false | false |
Lugoues/beets-lyrics | beetsplug/lyrics/engines/engine_cdmi.py | 1 | 1070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: engine_cdmi.py
import engine,urllib2,urllib,re
class cdmi(engine.engine):
def __init__(self,proxy,locale=None):
engine.engine.__init__(self,proxy,locale)
self.netEncoder='utf8'
def request(self,artist,title):
url = '%s - %s' % (str(unicode(artist,self.locale).encode('utf8')),(unicode(title,self.locale).encode('utf8')))
url = urllib.quote(url)
url = 'http://www.cdmi.net/LRC/cgi-bin/lrc_search.cgi?lrc_id=%s' % url
try:
#print url
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler,urllib2.ProxyHandler(self.proxy))
file=opener.open(url)
originalLrc=file.read()
file.close()
except IOError:
return (None,True)
else:
if(originalLrc.startswith('[ti:No Lyrics]')):
return (None,False)
else:
value = re.findall(r"\[ar:(.*?)\]",originalLrc)
if value:
artist = value[0]
value = re.findall(r"\[ti:(.*?)\]",originalLrc)
if value:
title = value[0]
return ([[artist,title,originalLrc]],False)
#def downIt(self,url):
#return url
| mit | -794,352,447,713,741,300 | 27.157895 | 113 | 0.652336 | false | 2.729592 | false | false | false |
IRIM-Technology-Transition-Lab/ur_cb2 | ur_cb2/receive/cb2_receive.py | 1 | 19023 | """A module to receive data from UR CB2 robots."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import struct
import array
import threading
class URReceiver(object):
"""A class to receive and process data from a UR Robot
The receiving and processing can be run in a separate thread by calling
start(). The stop() command must be called before exiting to halt the
additional thread. Alternatively, receive(), decode(), and
print_parsed_data() can be called in sequence in order to receive,
decode, and print data. One should not call receive(), decode(), or any
of the print methods, if a separate thread is being used. You should
never write to any of the data fields externally, however you can read
from them. Python's atomic read/write architecture should prevent you
from getting any half baked results from basic types, for all lists and
tuples, you must lock using lock (recommend that you use `with lock:`
paradigm.
Attributes:
clean_data: Double array of length 101 for all of the data returned by
the robot
raw_data: String of complete raw data packet
__socket: The socket for communications
clean_packets: The Integer number of packets which have been received
cleanly
stub_packets: The Integer number of packets which have been received
as stubs
received: The total Integer number of complete data sets which have
been received
waiting_data: String to hold incomplete data sets
new_data: Boolean whether new data is available for processing
time: Double of time elapsed since the controller was started
target_joint_positions: 6 member Double list of target joint positions
target_joint_velocities: 6 member Double list of target joint velocities
target_joint_accelerations: 6 member Double list of target joint
accelerations
target_joint_currents: 6 member Double list of target joint currents
target_joint_moments: 6 member Double list of target joint moments as
torques
actual_joint_positions: 6 member Double list of actual joint positions
actual_joint_velocities: 6 member Double list of actual joint velocities
actual_joint_currents: 6 member Double list of actual joint currents
tool_accelerometer: 3 member Double list of ool x,y and z accelerometer
values (software version 1.7)
force_tcp: 6 member Double list of generalised forces in the TCP
position: 6 member Double list of cartesian coordinates of the tool:
(x,y,z,rx,ry,rz), where rx, ry and rz is a rotation vector
representation of the tool orientation
tool_speed: 6 member Double list of speed of the tool given in cartesian
coordinates
digital_inputs: Current state of the digital inputs. NOTE: these are
bits encoded as int64_t, e.g. a value of 5 corresponds to bit 0 and
bit 2 set high
joint_temperature: 6 member Double list of temperature of each joint in
degrees celsius
controller_period: Double of controller real time thread execution time
robot_control_mode: Double of robot control mode (see
PolyScopeProgramServer on the "How to" page
joint_control_modes: 6 member Double list of joint control modes (see
PolyScopeProgramServer on the "How to" page) (only from software
version 1.8 and on)
run: Boolean on whether to run or not
__receiving_thread: Thread object for running the receiving and parsing
loops
verbose: Boolean defining whether or not to print data
lock: A threading lock which is used to protect data from race
conditions
_is_stopped: A boolean specifying whether the robot is stopped
"""
# Format specifier:
# ! : network (big endian)
# I : unsigned int, message size
# 85d : 85 doubles
# q : int64_t for digital inputs
# 15d : 15 doubles
#: Format spec for complete data packet
format = struct.Struct('! I 85d q 15d')
#: The format spec for the packet length field
formatLength = struct.Struct('! I')
#: The width to be given to name items when printing out
name_width = 30
#: The precision for printing data
precision = 7
double_format_string = "{:+0"+str(precision+4)+"."+str(precision)+"f}"
def __init__(self, open_socket, verbose=False):
"""Construct a UR Robot connection given connection parameters
Args:
open_socket (socket.socket): The socket to use for communications.
verbose (bool): Whether to print received data in main loop
"""
self.clean_data = array.array('d', [0] * 101)
self.raw_data = ''
self.__socket = open_socket
self.clean_packets = 0
self.stub_packets = 0
self.received = 0
self.waiting_data = ''
self.new_data = False
self.time = 0.0
self.target_joint_positions = [0.0]*6
self.target_joint_velocities = [0.0]*6
self.target_joint_accelerations = [0.0]*6
self.target_joint_currents = [0.0]*6
self.target_joint_moments = [0.0]*6
self.actual_joint_positions = [0.0]*6
self.actual_joint_velocities = [0.0]*6
self.actual_joint_currents = [0.0]*6
self.tool_accelerometer = [0.0]*3
self.force_tcp = [0.0]*6
self.position = [0.0]*6
self.tool_speed = [0.0]*6
self.digital_inputs = 0
self.joint_temperature = [0.0]*6
self.controller_period = 0.0
self.robot_control_mode = 0.0
self.joint_control_modes = [0.0]*6
self.run = False
self.__receiving_thread = None
self.verbose = verbose
self.lock = threading.Lock()
self._is_stopped = False
if verbose:
print "\033[2J" # Clear screen
def __del__(self):
"""Shutdown side thread and print aggregated connection stats"""
self.stop()
print "Received: "+str(self.received) + " data sets"
print "Received: "+str(self.clean_packets) + " clean packets"
print "Received: "+str(self.stub_packets) + " stub packets"
def decode(self):
"""Decode the data stored in the class's rawData field.
Only process the data if there is new data available. Unset the
self.newData flag upon completion. Note, this will lock the data set
and block execution in a number of other functions
"""
with self.lock:
if self.new_data:
self.clean_data = self.format.unpack(self.raw_data)
self.time = self.clean_data[1]
self.target_joint_positions = self.clean_data[2:8]
self.target_joint_velocities = self.clean_data[8:14]
self.target_joint_accelerations = self.clean_data[14:20]
self.target_joint_currents = self.clean_data[20:26]
self.target_joint_moments = self.clean_data[26:32]
self.actual_joint_positions = self.clean_data[32:38]
self.actual_joint_velocities = self.clean_data[38:44]
self.actual_joint_currents = self.clean_data[44:50]
self.tool_accelerometer = self.clean_data[50:53]
# unused = self.clean_data[53:68]
self.force_tcp = self.clean_data[68:74]
self.position = self.clean_data[74:80]
self.tool_speed = self.clean_data[80:86]
self.digital_inputs = self.clean_data[86]
self.joint_temperature = self.clean_data[87:93]
self.controller_period = self.clean_data[93]
# test value = self.clean_data[94]
self.robot_control_mode = self.clean_data[95]
self.joint_control_modes = self.clean_data[96:102]
self.new_data = False
self._is_stopped = self.is_stopped()
def receive(self):
"""Receive data from the UR Robot.
If an entire data set is not received, then store the data in a
temporary location (self.waitingData). Once a complete packet is
received, place the complete packet into self.rawData and set the
newData flag. Note, this will lock the data set and block execution in a
number of other functions once a full data set is built.
"""
incoming_data = self.__socket.recv(812) # expect to get 812 bytes
if len(incoming_data) == 812:
self.clean_packets += 1
else:
self.stub_packets += 1
if self.formatLength.unpack(incoming_data[0:4])[0] == 812:
self.waiting_data = incoming_data
else:
self.waiting_data += incoming_data
if len(self.waiting_data) == 812:
with self.lock:
self.raw_data = self.waiting_data
self.received += 1
self.new_data = True
def print_raw_data(self):
"""Print the raw data which is stored in self.raw_data.
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (raw): "+self.raw_data + "\n"
def print_data(self):
"""Print the processed data stored in self.clean_data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (unpacked):\n "
print self.clean_data
print "\n"
def output_data_item(self, name, values):
"""Output item with name and values.
Formatting is specified by self.name_width and self.precision.
Args:
name (str): The name of the value
values (float, int, tuple of float, list of float): The list of
values
"""
to_print = ("%-"+str(self.name_width)+"s") % name
if isinstance(values, (list, tuple)):
to_print += ": [%s]" % ', '.join(self.double_format_string.format(x)
for x in values)
elif isinstance(values, (int, bool)):
to_print += ": [%s]" % str(values)
elif isinstance(values, float):
to_print += ": [%s]" % self.double_format_string.format(values)
else:
print "I don't know that data type: " + str(type(values))
print to_print
def print_parsed_data(self):
"""Print the parsed data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "\033[H"
self.output_data_item("Time since controller turn on",
self.time)
self.output_data_item("Target joint positions",
self.target_joint_positions)
self.output_data_item("Target joint velocities",
self.target_joint_velocities)
self.output_data_item("Target joint accelerations",
self.target_joint_accelerations)
self.output_data_item("Target joint currents",
self.target_joint_currents)
self.output_data_item("Target joint moments (torque)",
self.target_joint_moments)
self.output_data_item("Actual joint positions",
self.actual_joint_positions)
self.output_data_item("Actual joint velocities",
self.actual_joint_velocities)
self.output_data_item("Actual joint currents",
self.actual_joint_currents)
self.output_data_item("Tool accelerometer values",
self.tool_accelerometer)
self.output_data_item("Generalised forces in the TCP",
self.force_tcp)
self.output_data_item("Cartesian tool position",
self.position)
self.output_data_item("Cartesian tool speed",
self.tool_speed)
self.output_data_item("Joint temperatures (deg C)",
self.joint_temperature)
self.output_data_item("Controller period",
self.controller_period)
self.output_data_item("Robot control mode",
self.robot_control_mode)
self.output_data_item("Joint control modes",
self.joint_control_modes)
print ((("%-"+str(self.name_width)+"s") % "Digital Input Number") +
": " + '|'.join('{:^2d}'.format(x) for x in range(0, 18)))
print ((("%-"+str(self.name_width)+"s") % "Digital Input Value: ") +
": " + '|'.join('{:^2s}'.format(x) for x in '{:018b}'.format(
self.digital_inputs)[::-1]))
self.output_data_item("Is Stopped:",
self._is_stopped)
def start(self):
"""Spawn a new thread for receiving and run it"""
if (self.__receiving_thread is None or
not self.__receiving_thread.is_alive()):
self.run = True
self.__receiving_thread = threading.Thread(group=None,
target=self.loop,
name='receiving_thread',
args=(),
kwargs={})
self.__receiving_thread.start()
def loop(self):
"""The main loop which receives, decodes, and optionally prints data"""
while self.run:
self.receive()
self.decode()
if self.verbose:
self.print_parsed_data()
def stop(self):
"""Stops execution of the auxiliary receiving thread"""
if self.__receiving_thread is not None:
if self.__receiving_thread.is_alive():
self.verbose_print('attempting to shutdown auxiliary thread',
'*')
self.run = False # Python writes like this are atomic
self.__receiving_thread.join()
self.verbose_print('\033[500D')
self.verbose_print('\033[500C')
self.verbose_print('-', '-', 40)
if self.__receiving_thread.is_alive():
self.verbose_print('failed to shutdown auxiliary thread',
'*')
else:
self.verbose_print('shutdown auxiliary thread', '*')
else:
self.verbose_print('auxiliary thread already shutdown', '*')
else:
self.verbose_print('no auxiliary threads exist', '*')
def verbose_print(self, string_input, emphasis='', count=5):
"""Print input if verbose is set
Args:
string_input (str): The input string to be printed.
emphasis (str): Emphasis character to be placed around input.
count (int): Number of emphasis characters to use.
"""
if self.verbose:
if emphasis == '':
print string_input
else:
print (emphasis*count + " " + string_input + " " +
emphasis * count)
def is_stopped(self, error=0.005):
"""Check whether the robot is stopped.
Check whether the joint velocities are all below some error. Note, this
will lock the data set and block execution in a number of other
functions
Args:
error (float): The error range to define "stopped"
Returns: Boolean, whether the robot is stopped.
"""
with self.lock:
to_return = (
all(v == 0 for v in self.target_joint_velocities) and
all(v < error for v in self.actual_joint_velocities))
return to_return
def at_goal(self, goal, cartesian, error=0.005):
"""Check whether the robot is at a goal point.
Check whether the differences between the joint or cartesian
coordinates are all below some error. This can be used to
determine if a move has been completed. It can also be used to
create blends by beginning the next move prior to the current one
reaching its goal. Note, this will lock the data set and block execution
in a number of other functions.
Args:
goal (6 member tuple or list of floats): The goal to check against
cartesian (bool): Whether the goal is in cartesian coordinates or
not (in which case joint coordinates)
error (float): The error range in which to consider an object at
its goal, in meters for cartesian space and radians for axis
space.
Returns: Boolean, whether the current position is within the error
range of the goal.
"""
with self.lock:
to_return = (
all(abs(g-a) < error for g, a in zip(self.position, goal))
if cartesian else
all(abs(g-a) < error for g, a in
zip(self.actual_joint_positions, goal)))
return to_return
def __enter__(self):
"""Enters the URRobot receiver from a with statement"""
return self
def __exit__(self, *_):
"""Exits at the end of a context manager statement by destructing."""
self.stop()
| mit | -9,197,845,572,874,843,000 | 43.76 | 80 | 0.585449 | false | 4.433232 | false | false | false |
hilgroth/fiware-IoTAgent-Cplusplus | tests/e2e_tests/common/user_steps.py | 1 | 17001 | from iotqautils.gtwRest import Rest_Utils_SBC
from common.gw_configuration import CBROKER_URL,CBROKER_HEADER,CBROKER_PATH_HEADER,IOT_SERVER_ROOT,DEF_ENTITY_TYPE,MANAGER_SERVER_ROOT
from lettuce import world
iotagent = Rest_Utils_SBC(server_root=IOT_SERVER_ROOT+'/iot')
iota_manager = Rest_Utils_SBC(server_root=MANAGER_SERVER_ROOT+'/iot')
URLTypes = {
"IoTUL2": "/iot/d",
"IoTRepsol": "/iot/repsol",
"IoTEvadts": "/iot/evadts",
"IoTTT": "/iot/tt",
"IoTMqtt": "/iot/mqtt"
}
ProtocolTypes = {
"IoTUL2": "PDI-IoTA-UltraLight",
"IoTTT": "PDI-IoTA-ThinkingThings",
"IoTMqtt": "PDI-IoTA-MQTT-UltraLight"
}
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class UserSteps(object):
world.service_exists = False
world.service_path_exists = False
world.device_exists = False
def service_created(self, service_name, service_path={}, resource={}):
headers = {}
params = {}
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path == 'void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
if resource:
params['resource']= resource
service = iotagent.get_service('', headers, params)
if service.status_code == 200:
serv = service.json()
if serv['count'] == 1:
world.service_exists = True
if resource:
world.service_path_exists = True
return True
else:
return False
else:
return False
def device_created(self, service_name, device_name, service_path={}):
headers = {}
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path=='void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
device = iotagent.get_device(device_name, headers)
if device.status_code == 200:
world.device_exists=True
return True
else:
return False
def create_device(self, service_name, device_name, service_path={}, endpoint={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}, protocol={}, manager={}):
headers = {}
if not service_name=='void':
headers[CBROKER_HEADER] = str(service_name)
if service_path:
if not service_path=='void':
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
device={
"devices":[
{
# "device_id": device_name
}
]
}
if device_name:
if device_name=='void':
device_name=""
device['devices'][0]['device_id'] = device_name
if commands:
device['devices'][0]['commands'] = commands
if endpoint:
device['devices'][0]['endpoint'] = endpoint
if entity_type:
device['devices'][0]['entity_type'] = entity_type
# else:
# device['devices'][0]['entity_type'] = DEF_ENTITY_TYPE
if entity_name:
device['devices'][0]['entity_name'] = entity_name
if attributes:
device['devices'][0]['attributes'] = attributes
if static_attributes:
device['devices'][0]['static_attributes'] = static_attributes
if protocol:
if protocol=="void":
protocol=""
device['devices'][0]['protocol'] = protocol
if manager:
req = iota_manager.post_device(device,headers)
else:
req = iotagent.post_device(device,headers)
# assert req.status_code == 201, 'ERROR: ' + req.text + "El device {} no se ha creado correctamente".format(device_name)
return req
def create_service(self, service_name, protocol, attributes={}, static_attributes={}):
headers = {}
headers[CBROKER_HEADER] = service_name
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
resource = URLTypes.get(protocol)
if (protocol == 'IotTT') | (protocol == 'IoTRepsol'):
apikey=''
else:
apikey='apikey_' + str(service_name)
service={
"services":[
{
"apikey": apikey,
"entity_type": DEF_ENTITY_TYPE,
"cbroker": CBROKER_URL,
"resource": resource
}
]
}
if attributes:
service['services'][0]['attributes'] = attributes
if static_attributes:
service['services'][0]['static_attributes'] = static_attributes
req = iotagent.post_service(service, headers)
assert req.status_code == 201, 'ERROR: ' + req.text + "El servicio {} no se ha creado correctamente".format(service_name)
world.service_exists = True
return req
def create_service_with_params(self, service_name, service_path, resource={}, apikey={}, cbroker={}, entity_type={}, token={}, attributes={}, static_attributes={}, protocol={}):
world.protocol={}
headers = {}
if not service_name == 'void':
headers[CBROKER_HEADER] = service_name
if not service_path == 'void':
headers[CBROKER_PATH_HEADER] = str(service_path)
service={
"services":[
{
# "resource": resource
}
]
}
if resource:
if not resource == 'void':
if not resource == 'null':
service['services'][0]['resource'] = resource
else:
service['services'][0]['resource'] = ""
# if not apikey == 'void':
if apikey:
if not apikey == 'null':
service['services'][0]['apikey'] = apikey
else:
service['services'][0]['apikey'] = ""
if cbroker:
if not cbroker == 'null':
service['services'][0]['cbroker'] = cbroker
else:
service['services'][0]['cbroker'] = ""
if entity_type:
service['services'][0]['entity_type'] = entity_type
if token:
service['services'][0]['token'] = token
if attributes:
service['services'][0]['attributes'] = attributes
if static_attributes:
service['services'][0]['static_attributes'] = static_attributes
if protocol:
if not protocol == 'void':
if not protocol == 'null':
resource = URLTypes.get(protocol)
prot = ProtocolTypes.get(protocol)
if not prot:
prot = protocol
service['services'][0]['protocol']= [prot]
else:
resource = protocol
service['services'][0]['protocol'] = []
req = iota_manager.post_service(service, headers)
else:
req = iotagent.post_service(service, headers)
if req.status_code == 201 or req.status_code == 409:
world.remember.setdefault(service_name, {})
if service_path == 'void':
service_path='/'
# world.remember[service_name].setdefault('path', set())
# world.remember[service_name]['path'].add(service_path)
# world.remember[service_name]['path'][service_path].setdefault('resource', set())
# world.remember[service_name]['path'][service_path]['resource'].add(service_path)
world.remember[service_name].setdefault(service_path, {})
world.remember[service_name][service_path].setdefault('resource', {})
world.remember[service_name][service_path]['resource'].setdefault(resource, {})
if not apikey:
apikey = ""
world.remember[service_name][service_path]['resource'][resource].setdefault(apikey)
# print world.remember
world.service_exists = True
world.service_path_exists = True
return req
def delete_device(self, device_name, service_name, service_path={}):
headers = {}
headers[CBROKER_HEADER] = service_name
if service_path:
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
req = iotagent.delete_device(device_name,headers)
assert req.status_code == 204, 'ERROR: ' + req.text + "El device {} no se ha borrado correctamente".format(device_name)
return req
def delete_service(self, service_name, service_path={}, resource={}, apikey={}):
params={}
headers = {}
headers[CBROKER_HEADER] = service_name
if world.protocol:
resource2 = URLTypes.get(world.protocol)
if (world.protocol == 'IotTT') | (world.protocol == 'IoTRepsol'):
apikey=''
else:
apikey='apikey_' + str(service_name)
params = {"resource": resource2,
"apikey": apikey}
if resource:
if apikey:
params = {"resource": resource,
"apikey": apikey
}
else:
params = {"resource": resource}
if service_path:
headers[CBROKER_PATH_HEADER] = str(service_path)
else:
headers[CBROKER_PATH_HEADER] = '/path_' + str(service_name)
print params
req = iotagent.delete_service('', headers, params)
assert req.status_code == 204, 'ERROR: ' + req.text + "El servicio {} no se ha borrado correctamente".format(service_name)
return req
def service_precond(self, service_name, protocol, attributes={}, static_attributes={}):
world.service_name = service_name
if not self.service_created(service_name):
service = self.create_service(service_name, protocol, attributes, static_attributes)
assert service.status_code == 201, 'Error al crear el servcio {} '.format(service_name)
print 'Servicio {} creado '.format(service_name)
else:
print 'El servicio {} existe '.format(service_name)
world.protocol=protocol
world.remember.setdefault(service_name, {})
world.service_exists = True
def service_with_params_precond(self, service_name, service_path, resource, apikey, cbroker={}, entity_type={}, token={}, attributes={}, static_attributes={}):
world.protocol={}
world.service_name = service_name
if not self.service_created(service_name, service_path, resource):
service = self.create_service_with_params(service_name, service_path, resource, apikey, cbroker, entity_type, token, attributes, static_attributes)
assert service.status_code == 201, 'Error al crear el servcio {} '.format(service_name)
print 'Servicio {} creado '.format(service_name)
else:
print 'El servicio {} existe '.format(service_name)
world.remember.setdefault(service_name, {})
if service_path == 'void':
service_path='/'
world.remember[service_name].setdefault(service_path, {})
world.remember[service_name][service_path].setdefault('resource', {})
world.remember[service_name][service_path]['resource'].setdefault(resource, {})
if not apikey:
apikey = ""
world.remember[service_name][service_path]['resource'][resource].setdefault(apikey)
world.service_exists = True
world.service_path_exists = True
def device_precond(self, device_id, endpoint={}, protocol={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}):
world.device_id = device_id
if not self.device_created(world.service_name, device_id):
prot = ProtocolTypes.get(protocol)
device = self.create_device(world.service_name, device_id, {}, endpoint, commands, entity_name, entity_type, attributes, static_attributes, prot)
assert device.status_code == 201, 'Error al crear el device {} '.format(device_id)
print 'Device {} creado '.format(device_id)
else:
print 'El device {} existe '.format(device_id)
world.remember[world.service_name].setdefault('device', set())
world.remember[world.service_name]['device'].add(device_id)
world.device_exists = True
def device_of_service_precond(self, service_name, service_path, device_id, endpoint={}, commands={}, entity_name={}, entity_type={}, attributes={}, static_attributes={}, protocol={}, manager={}):
world.device_id = device_id
if not self.device_created(service_name, device_id, service_path):
prot = ProtocolTypes.get(protocol)
device = self.create_device(world.service_name, device_id, service_path, endpoint, commands, entity_name, entity_type, attributes, static_attributes, prot, manager)
assert device.status_code == 201, 'Error al crear el device {} '.format(device_id)
print 'Device {} con path {} creado '.format(device_id, service_path)
else:
print 'El device {} existe '.format(device_id)
if service_path=='void':
service_path2='/'
else:
service_path2=service_path
world.remember[service_name][service_path2].setdefault('device', set())
world.remember[service_name][service_path2]['device'].add(device_id)
world.device_exists = True
def clean(self,dirty):
if world.service_exists:
for srv in dirty.keys():
if world.device_exists:
if world.service_path_exists:
for path in dirty[srv]:
if dirty[srv][path].__contains__('device'):
for device in dirty[srv][path]['device']:
req_device = self.delete_device(device,srv,path)
if req_device.status_code == 204:
print 'Se ha borrado el device:{} del servicio:{} y path:{}'.format(device,srv,path)
else:
print 'No se ha podido borrar el device:{} del servicio:{} y path:{}'.format(device,srv,path)
else:
if dirty[srv].__contains__('device'):
for device in dirty[srv]['device']:
req_device = self.delete_device(device,srv)
if req_device.status_code == 204:
print 'Se ha borrado el device ' + str(device) + ' del servicio ' + str(srv)
else:
print 'No se ha podido borrar el device ' + str(device) + ' del servicio ' + str(srv)
if world.service_path_exists:
for path in dirty[srv]:
if dirty[srv][path].__contains__('resource'):
for resource in dirty[srv][path]['resource']:
for apikey in dirty[srv][path]['resource'][resource]:
req_service = self.delete_service(srv, path, resource, apikey)
if req_service.status_code == 204:
print 'Se ha borrado el servicio:{} path:{} resource:{} y apikey:{}'.format(srv,path,resource,apikey)
else:
print 'No se ha podido borrar el servicio:{} path:{} resource:{} y apikey:{}'.format(srv,path,resource,apikey)
else:
req_service = self.delete_service(srv)
if req_service.status_code == 204:
print 'Se ha borrado el servicio ' + srv
else:
print 'No se ha podido borrar el servicio ' + srv | agpl-3.0 | -420,944,151,459,074,800 | 45.761236 | 199 | 0.529792 | false | 4.273756 | false | false | false |
thanhphat11/android_kernel_xiaomi_msm8996 | toolchains/aarch64-linux-gnu/libc/lib/libstdc++.so.6.0.20-gdb.py | 6 | 2599 | # -*- python -*-
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/tcwg-buildslave/workspace/tcwg-make-release/label/tcwg-x86_64-ex40/target/aarch64-linux-gnu/_build/builds/destdir/x86_64-unknown-linux-gnu/share/gcc-4.9.4/python'
libdir = '/home/tcwg-buildslave/workspace/tcwg-make-release/label/tcwg-x86_64-ex40/target/aarch64-linux-gnu/_build/builds/destdir/x86_64-unknown-linux-gnu/aarch64-linux-gnu/lib/../lib64'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| gpl-2.0 | 6,082,350,582,808,913,000 | 42.316667 | 186 | 0.725279 | false | 3.53125 | false | false | false |
riggsd/davies | examples/compass_cumulative_mileage.py | 1 | 1086 | #!/usr/bin/env python
"""
Print cumulative survey footage over a project's history
"""
import sys
import os.path
from collections import Counter
from davies.compass import *
def print_cumulative_footage(datfiles):
total = 0.0 # miles
monthly_stats = Counter() # feet per month
cumulative_stats = {} # cumulative miles by month
for datfilename in datfiles:
print datfilename, '...'
datfile = DatFile.read(datfilename)
for survey in datfile:
month = survey.date.strftime('%Y-%m')
monthly_stats[month] += survey.included_length
print 'MONTH\tFEET\tTOTAL MILES'
for month in sorted(monthly_stats.keys()):
total += monthly_stats[month] / 5280.0
cumulative_stats[month] = total
print '%s\t%5d\t%5.1f' % (month, monthly_stats[month], total)
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s DATFILE...' % os.path.basename(sys.argv[0])
sys.exit(2)
datfiles = sys.argv[1:]
print_cumulative_footage(datfiles)
| mit | 1,601,293,054,113,382,000 | 29.166667 | 83 | 0.619705 | false | 3.415094 | false | false | false |
Rob-van-B/CarbonFoodweb | OwnRothC/PLOT/plot.py | 1 | 3692 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ploty.py
#
# Copyright 2016 rob <rob@Novu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import matplotlib
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from pylab import text
import wikipedia
from pylab import savefig, rcParams
'''some plot globals'''
bbox = dict(boxstyle="round,pad=0.3", fc="white", ec="k", lw=0.1)
matplotlib.style.use("ggplot")
rcParams.update({'figure.autolayout': True})
matplotlib.rc('font', family='DejaVu Sans')
def main(data_list):
"""WorldClim.org now with 20km resolution / 1 km would be possible """
fig = plt.figure(figsize = (15,7), facecolor="0.92", edgecolor='k')
#ax = fig.add_subplot(111)
font = 10
Title = ("Improved RothC SOC Turnover Model // Baseline // Ferralsol Novo Progresseo")
fig.suptitle(Title, fontsize=11, y=1 )
ax1 = plt.subplot2grid((2,4), (0, 0), rowspan=1, colspan=1)
ax2 = plt.subplot2grid((2,4), (1, 0), rowspan=1, colspan=1)
ax3 = plt.subplot2grid((2,4), (0, 1), rowspan=1, colspan=1)
ax4 = plt.subplot2grid((2,4), (1, 1), rowspan=1, colspan=1)
ax5 = plt.subplot2grid((2,4), (0, 2), rowspan=1, colspan=2)
ax6 = plt.subplot2grid((2,4), (1, 2), rowspan=1, colspan=2)
plt.subplots_adjust(bottom=0.85)
#make empty links to append the y data
rpm_list = []
dpm_list = []
bio_list = []
hum_list = []
co2_list = []
x_list = []
# walk through the data and append the data
value = "Y_"
for i in data_list:
if i["pool"] == "rpm":
rpm_list.append(i[value])
if i["pool"] == "dpm":
dpm_list.append(i[value])
if i["pool"] == "bio":
bio_list.append(i[value])
if i["pool"] == "hum":
hum_list.append(i[value])
if i["pool"] == "co2":
co2_list.append(i[value])
x_list.append(np.datetime64(i["datetime"]).astype(datetime))
# sump up all lists to get total SOC
full_list = [sum(x) for x in zip(rpm_list, dpm_list, hum_list, bio_list)]
ax1.plot(x_list, rpm_list, "#616161")
ax1.set_ylabel((r'Mg SOC ha$^-$' + r'$^1$' ))
ax1.set_title("RPM pool", fontsize = font)
ax2.plot(x_list , dpm_list, "#424242")
ax2.set_title("DPM pool", fontsize = font)
ax2.set_ylabel((r'Mg SOC ha$^-$' + r'$^1$' ))
ax2.set_xlabel("year")
ax3.plot(x_list, hum_list, "#424242")
ax3.set_title("HUM pool", fontsize = font)
ax4.plot(x_list, bio_list, "#616161")
ax4.set_title("BIO pool", fontsize = font)
ax4.set_xlabel("year")
ax5.plot(x_list, full_list, "#616161")
ax5.set_title("SOC combined", fontsize = font)
ax6.plot(x_list, co2_list, "#424242")
ax6.set_title("C02", fontsize = font)
ax6.set_xlabel("year")
plt.show()
savefig("fig.png", bbox_inches="tight")
return 0
if __name__ == '__main__':
main()
| gpl-2.0 | -1,941,707,459,599,803,100 | 30.288136 | 94 | 0.616197 | false | 2.999188 | false | false | false |
ifduyue/sentry | src/sentry/api/endpoints/organization_release_file_details.py | 4 | 5867 | from __future__ import absolute_import
import posixpath
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationReleasesBaseEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseFile
try:
from django.http import (CompatibleStreamingHttpResponse as StreamingHttpResponse)
except ImportError:
from django.http import StreamingHttpResponse
class ReleaseFileSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
class OrganizationReleaseFileDetailsEndpoint(OrganizationReleasesBaseEndpoint):
doc_section = DocSection.RELEASES
def download(self, releasefile):
file = releasefile.file
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b''),
content_type=file.headers.get('content-type', 'application/octet-stream'),
)
response['Content-Length'] = file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % posixpath.basename(
" ".join(releasefile.name.split())
)
return response
def get(self, request, organization, version, file_id):
"""
Retrieve an Organization Release's File
```````````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
download_requested = request.GET.get('download') is not None
if download_requested and (request.access.has_scope('project:write')):
return self.download(releasefile)
elif download_requested:
return Response(status=403)
return Response(serialize(releasefile, request.user))
def put(self, request, organization, version, file_id):
"""
Update an Organization Release's File
`````````````````````````````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:param string dist: the name of the dist.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
serializer = ReleaseFileSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
releasefile.update(
name=result['name'],
)
return Response(serialize(releasefile, request.user))
def delete(self, request, organization, version, file_id):
"""
Delete an Organization Release's File
`````````````````````````````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=organization.id,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise PermissionDenied
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
file = releasefile.file
# TODO(dcramer): this doesnt handle a failure from file.deletefile() to
# the actual deletion of the db row
releasefile.delete()
file.delete()
return Response(status=204)
| bsd-3-clause | 8,733,847,621,237,979,000 | 33.715976 | 91 | 0.613261 | false | 5.070873 | false | false | false |
seecr/meresco-components | test/log/logfileservertest.py | 1 | 4301 | # -*- coding: utf-8 -*-
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2006-2011, 2020 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2012, 2020 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2020 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020 SURF https://www.surf.nl
# Copyright (C) 2020 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from seecr.test import SeecrTestCase, CallTrace
from os.path import join
from os import mkdir, listdir
from meresco.components.http import utils as httputils
from meresco.components.http.utils import CRLF, notFoundHtml
from meresco.components.log import LogFileServer, DirectoryLog
class LogFileServerTest(SeecrTestCase):
def setUp(self):
SeecrTestCase.setUp(self)
self.logDir = join(self.tempdir, 'log')
directoryLog = DirectoryLog(self.logDir)
self.qlfs = LogFileServer("Fancy <name>", directoryLog, basepath='/log')
def testGenerateEmptyHtmlFileLinkListing(self):
headers, body = "".join(self.qlfs.handleRequest(path="/log")).split(CRLF+CRLF)
self.assertEqual('HTTP/1.0 200 OK\r\nContent-Type: text/html; charset=utf-8', headers)
self.assertTrue(body.startswith('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">\n<html>'), body)
self.assertTrue(body.rfind('</body>\n</html>') != -1, body)
self.assertTrue('<title>"Fancy <name>" Logging</title>' in body, body)
self.assertTrue('Logging - logfile listing' in body, body)
def testEmptyDirectoryEmptyHtmlResult(self):
headers, body = "".join(self.qlfs.handleRequest(path="/")).split(CRLF+CRLF)
self.assertFalse('<li>' in body)
def testDirectoryHtmlResult(self):
filename = '2009-11-10-afile.1'
open(join(self.logDir, filename), 'w').close()
headers, body = "".join(self.qlfs.handleRequest(path="/log")).split(CRLF+CRLF)
self.assertTrue('<li>' in body)
self.assertTrue('<a href="/log/%s"' % filename in body, body)
filename2 = '2009-11-22-yet_another_file.txt'
open(join(self.logDir, filename2), 'w').close()
headers, body = "".join(self.qlfs.handleRequest(path="/log/")).split(CRLF+CRLF)
self.assertTrue('<a href="/log/%s"' % filename in body, body)
self.assertTrue('<a href="/log/%s"' % filename2 in body, body)
self.assertTrue(body.index(filename) > body.index(filename2), 'The files should be sorted.')
def testPathNotSpecifiedAsIndexEffectivelyUsesMerescoFileServer(self):
headers, body = "".join(self.qlfs.handleRequest(path="/thisIsWrongMister")).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
def testPathIsASubDir(self):
aSubDirCalled = "subdir"
mkdir(join(self.logDir, aSubDirCalled))
headers, body = "".join(self.qlfs.handleRequest(path="/%s" % aSubDirCalled)).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
def testGetNonExistingLogFile(self):
headers, body = "".join(self.qlfs.handleRequest(path="/log/thisIsWrongMister")).split(CRLF+CRLF)
self.assertTrue("HTTP/1.0 404 Not Found" in headers, headers)
| gpl-2.0 | -4,408,019,703,212,857,000 | 45.247312 | 108 | 0.683562 | false | 3.513889 | true | false | false |
selentd/pythontools | pytools/src/IndexEval/evalresult.py | 1 | 8460 | '''
Created on 21.10.2015
@author: selen00r
'''
class ExcludeTransaction:
'''
Base class for strategies to exclude transactions while calculating results from the transaction history
'''
def exclude(self, idxData):
return False
class ExcludeAvg200Low(ExcludeTransaction):
def __init__(self, offset = 0.0):
self.offset = offset
def exclude(self, transactionResult):
if transactionResult.indexBuy.mean200 > 0:
checkValue = transactionResult.indexBuy.mean200 + (transactionResult.indexBuy.mean200 * self.offset)
return (transactionResult.indexBuy.close < checkValue)
else:
return True
class TransactionResultPrinter:
'''
Base class for printing of transactions
'''
def printResult(self, transactionResult, result, resultEuro, hasResult = False ):
pass
class ResultCalculator:
'''
Base class to calculate a transaction result
'''
def __init__(self):
self.total = 0.0
def calcResult(self, buy, sell):
result = (float(sell)/float(buy))-1.0
self.total += result
return result
def reset(self):
self.total = 0
def getTotal(self):
return self.total
class ResultCalculatorPut(ResultCalculator):
def __init__(self):
ResultCalculator.__init__(self)
def calcResult(self, buy, sell):
return ResultCalculator.calcResult(self, buy, sell) * (-1.0)
class ResultCalculatorEuro(ResultCalculator):
'''
Base class to calculate a transaction result in Euros
'''
def __init__(self, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculator.__init__(self)
self.invest = invest
self.total = invest
self.totalInvest = invest
self.fixInvest = fixInvest
self.maxInvest = maxInvest
def _checkTotal(self):
if self.total < 0:
self.total = 0
if self.total < self.invest:
self.totalInvest = self.totalInvest +(self.invest - self.total)
self.total = self.invest
def calcResult(self, buy, sell):
result = ResultCalculator().calcResult(buy, sell)
if self.fixInvest:
result *= self.invest
else:
result *= self.total
self.total += result
self._checkTotal()
return result
def reset(self):
self.total = self.invest
self.totalInvest = self.invest
def getTotalInvest(self):
return self.totalInvest
class ResultCalculatorEuroPut(ResultCalculatorEuro):
def __init__(self, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuro.__init__(self, invest, fixInvest, maxInvest)
def calcResult(self, buy, sell):
return ResultCalculatorEuro.calcResult(self, buy, sell, knockOut) * (-1.0)
class ResultCalculatorEuroLeverage(ResultCalculatorEuro):
def __init__(self, distance, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuro.__init__(self, invest, fixInvest, maxInvest)
self.distance = distance
self.k = 1.1302864364
self.d = 0.2029128054
def calcResult(self, buy, sell):
result = ResultCalculator().calcResult(buy, sell)
startCalc = (self.k * (self.distance)) + self.d
actCalc = (self.k * ((self.distance) + (result*100.0))) + self.d
percCalc = (actCalc / startCalc)-1
if self.fixInvest:
result = self.invest * percCalc
else:
newInvest = self.total
if newInvest < self.invest:
newInvest = self.invest
if (self.maxInvest > 0.0) and (newInvest > self.maxInvest):
result = (self.maxInvest) * percCalc
else:
result = newInvest * percCalc
self.total += result
self._checkTotal()
return result
class ResultCalculatorEuroLeveragePut(ResultCalculatorEuroLeverage):
def __init__(self, distance, invest, fixInvest = True, maxInvest = 0.0):
ResultCalculatorEuroLeverage.__init__(self, distance, invest, fixInvest, maxInvest)
def calcResult(self, buy, sell):
return ResultCalculatorEuroLeverage.calcResult(self, buy, sell) * (-1.0)
class EvalResult:
'''
Base class for transaction result evaluation
'''
def __init__(self, name, invest, fixInvest = True):
self.name = name
self.winCount = 0
self.lossCount = 0
self.knockOutCount = 0
self.maxWin = 0.0
self.maxLoss = 0.0
self.sumWin = 0.0
self.sumLoss = 0.0
self.maxWinEuro = 0.0
self.maxLossEuro = 0.0
self.sumWinEuro = 0.0
self.sumLossEuro = 0.0
self.invest = invest
self.fixInvest = fixInvest
self.checkExclude = ExcludeTransaction()
self.resultCalculator = ResultCalculator()
self.resultCalculatorEuro = ResultCalculatorEuro( self.invest, self.fixInvest )
def _updateWin(self, result, resultEuro):
self.winCount += 1
self.sumWin += result
if (self.maxWin < result):
self.maxWin = result
if (self.maxWinEuro < resultEuro):
self.maxWinEuro = resultEuro
self.sumWinEuro += resultEuro
def _updateLoss(self, result, resultEuro):
self.lossCount += 1
self.sumLoss += result
if (self.maxLoss > result):
self.maxLoss = result
if (self.maxLossEuro > resultEuro):
self.maxLossEuro = resultEuro
self.sumLossEuro += resultEuro
def setExcludeChecker(self, checkExclude):
self.checkExclude = checkExclude
def setResultCalculator(self, calculator):
self.resultCalculator = calculator
def setResultCalculatorEuro(self, calculator):
self.resultCalculatorEuro = calculator
def getTotalCount(self):
return (self.winCount + self.lossCount)
def getTotalResult(self):
return self.resultCalculator.getTotal()
def getTotalResultEuro(self):
return self.resultCalculatorEuro.getTotal()
def getTotalInvestEuro(self):
return self.resultCalculatorEuro.getTotalInvest()
def getWinRatio(self):
if (self.getTotalCount() > 0):
return (float(self.winCount)/float(self.getTotalCount()))
else:
return 0.0
def getMeanWin(self):
if (self.winCount > 0):
return (self.sumWin / float(self.winCount))
else:
return 0
def getMeanLoss(self):
if (self.lossCount > 0):
return (self.sumLoss / float(self.lossCount))
else:
return 0
def getWinLoss(self, buy, sell):
return self.resultCalculator.calcResult(buy, sell)
def getWinLossEuro(self, buy, sell):
return self.resultCalculatorEuro.calcResult(buy, sell)
def evaluateIndex(self, transactionResultHistory, resultPrinter = None ):
for transactionResult in transactionResultHistory:
self.evaluate( transactionResult, resultPrinter )
def _updateResult(self, transactionResult, result, resultEuro ):
if result < 0.0:
self._updateLoss(result, resultEuro)
else:
self._updateWin(result, resultEuro)
def evaluate(self, transactionResult, resultPrinter = None):
hasResult = False
result = 0.0
resultEuro = 0.0
if not (self.checkExclude.exclude(transactionResult)):
indexSell = transactionResult.indexSell.close
if transactionResult.knockOut != 0.0:
indexKnockOut = transactionResult.indexBuy.close + (transactionResult.indexBuy.close * transactionResult.knockOut)
if transactionResult.knockOut < 0:
if transactionResult.getLowValue() < indexKnockOut:
indexSell = indexKnockOut
else:
if transactionResult.getHighValue() > indexKnockOut:
indexSell = indexKnockOut
result = self.getWinLoss( transactionResult.indexBuy.close, indexSell)
resultEuro = self.getWinLossEuro(transactionResult.indexBuy.close, indexSell)
self._updateResult( transactionResult, result, resultEuro)
hasResult = True
if resultPrinter:
resultPrinter.printResult( transactionResult, result, resultEuro, hasResult )
| apache-2.0 | 7,615,614,308,887,554,000 | 30.449814 | 130 | 0.623759 | false | 3.900415 | false | false | false |
ciarams87/CloudTools | CloudTools.py | 1 | 2133 | #!/usr/bin/env python
"""
This is a console-driven menu program for management of some AWS and OpenStack cloud services
"""
import time
from Menu_Option_1 import menu_1
from Menu_Option_2 import menu_2
from Menu_Option_3 import menu_3
from Menu_Option_4 import menu_4
from Menu_Option_5 import menu_5
def main():
menu1 = menu_1()
menu2 = menu_2()
menu3 = menu_3()
menu4 = menu_4()
menu5 = menu_5()
# loop_lvl_1 displays the initial menu options
loop_lvl_1 = 1
while loop_lvl_1 == 1:
# print initial menu options
print "\nWelcome to CloudTools. \nPlease make a selection by " \
"entering the number of your chosen menu item below.\n" \
"Your options are:\n" \
"1) Compute \n2) Storage \n3) CloudWatch Monitoring \n" \
"4) AutoScaling \n5) CloudTrail \n6) Quit CloudTools"
choice_lvl_1 = input("Choose your option: \n")
if choice_lvl_1 == 1:
menu1.main_menu()
elif choice_lvl_1 == 2:
menu2.main_menu()
elif choice_lvl_1 == 3:
menu3.main_menu()
elif choice_lvl_1 == 4:
menu4.main_menu()
elif choice_lvl_1 == 5:
menu5.main_menu()
elif choice_lvl_1 == 6:
loop_lvl_1 = 0
else:
print "Please enter number between 1 and 6 only"
time.sleep(2)
print "Thank you for using CloudTools, Goodbye."
main()
"""
lines 6 - 11: Import all required modules
line 14: Create the main method, i.e. the application
lines 16 - 20: instantiate the menu option objects
line 23: create loop variable
lines 24 - 31: while the loop is set to 1; print the menu to screen
lines 32 - 46: Reads in user input and calls relevant menu object
lines 48 - 49: If user hits 6, exit the loop and end the application
lines 51 - 53: If user enters a character not stated in the menu, print
error message
line 55: prints goodbye message when application is ended
line 58: calls the main method
"""
| gpl-3.0 | -3,578,840,255,257,954,000 | 26.065789 | 93 | 0.596812 | false | 3.652397 | false | false | false |
itfoundry/hindkit | lib/hindkit/__init__.py | 1 | 2210 | import os, sys, functools, shutil, errno
def relative_to_interpreter(path):
return os.path.join(os.path.dirname(sys.executable), path)
def relative_to_package(path):
return os.path.join(__path__[0], path)
def relative_to_cwd(path):
return os.path.join(os.getcwd(), path)
def memoize(obj):
memoized = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
k = str(args) + str(kwargs)
if k not in memoized:
memoized[k] = obj(*args, **kwargs)
return memoized[k]
return memoizer
def remove(path):
try:
os.remove(path)
except OSError:
if not os.path.exists(path):
pass
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except OSError as e:
if e.errno == errno.ENOTEMPTY:
shutil.rmtree(path)
else:
raise
else:
raise
def makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def copy(src, dst):
remove(dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def fallback(*candidates):
for i in candidates:
if i is not None:
return i
def remove_illegal_chars_for_postscript_name_part(name):
"""
Conforming The Compact Font Format Specification (version 1.0), section 7 "Name INDEX".
Also removing "-".
"""
return name.translate({
ord(i): None for i in "[](){}<>/%\u0000\u0020\u0009\u000D\u000A\u000C-"
})
from hindkit import constants
from hindkit import filters
from hindkit.objects.base import BaseFile
from hindkit.objects.family import Family, DesignSpace, Fmndb
from hindkit.objects.font import Master, Style, Product
from hindkit.objects.glyphdata import GlyphData, Goadb
from hindkit.objects.client import Client
from hindkit.objects.feature import FeatureClasses, FeatureTables, FeatureLanguagesystems, FeatureGSUB, FeatureGPOS, FeatureKern, FeatureMark, FeatureOS2Extension, FeatureNameExtension, FeatureMatches, FeatureReferences
from hindkit.objects.project import Project
| mit | -2,916,077,420,419,986,000 | 28.078947 | 219 | 0.639819 | false | 3.646865 | false | false | false |
mikefullerton/Piecemeal-Scripts | Scripts/utils/repo.py | 1 | 2971 | #!/usr/bin/python
# begin boilerplate
import sys
import os
scriptName = os.path.basename(sys.argv[0])
scriptPath = os.path.dirname(sys.argv[0])
sharedPath = os.path.join(scriptPath, "../shared/")
sys.path.append(os.path.abspath(sharedPath))
import Pieces
import GitHelper
import Scripts
import Utils
#end boilerplat
class Script(Scripts.Script):
def helpString(self):
return "fishlamp repo <init or update> <repo name if init>\nIf current repo is not a FishLamp repo, this inits the current repo to be a FishLamp project. Otherwise it just updates the submodules\nAlso and adds fishlamp-core submodule if it's not added"
def update(self) :
GitHelper.confirmGitRoot();
Pieces.createFishLampFolderIfNeeded();
# GitHelper.addSubmodules(Pieces.defaultPieces(), Pieces.folderName());
def init(self) :
parm = self.parameterAtIndex(2, "Expecting name of path as first argument");
if os.path.exits(parm):
Utils.printError(parm + " already exists");
sys.exit(1);
os.makedirs(directory)
os.chdir(directory)
if GitHelper.isGitRepo() :
Utils.printError("git repo already exists");
sys.exit(1);
GitHelper.init();
self.update();
def run(self):
mode = self.parameterAtIndex(1, "Expecting either update or init as first parameter");
if mode == "update":
self.update();
elif mode == "init":
self.init();
else:
Utils.printError("expecting mode or update");
Script().run();
"""
init repo
#!/bin/bash
function usage() {
echo "if current repo is not a FishLamp repo, this inits the current repo to be a FishLamp project. Otherwise it just updates the submodules".
echo "Also and adds fishlamp-core submodule if it's not added"
}
if [ "$1" == "--help" ]; then
usage
exit 0;
fi
set -e
MY_PATH="`dirname \"$0\"`"
MY_PATH="`( cd \"$MY_PATH\" && pwd )`"
fishlamp="FishLamp"
fishlamp_dir="`pwd`/$fishlamp"
if [ ! -d ".git" -a ! -f ".git" ]; then
echo "# git folder not found - please run in root of your repository."
exit 1
fi
if [ ! -d "$fishlamp_dir" ]; then
mkdir "$fishlamp_dir"
fi
declare -a repo_list=( "fishlamp-core" )
git submodule update --init --recursive
for submodule in "${repo_list[@]}"; do
fishlamp update-submodule "$submodule"
done
"""
"""
#!/bin/sh
# fishlamp-new-repo.sh
# fishlamp-install
#
# Created by Mike Fullerton on 7/27/13.
#
function usage() {
echo "creates a new folder for the repo and then inits the new repo to be a FishLamp project."
}
if [ "$1" == "--help" ]; then
usage
exit 0;
fi
set -e
new_repo="$1"
if [ -d "$new_repo" ]; then
echo "$new_repo already exists"
exit 1;
fi
mkdir -p "$new_repo"
cd "$new_repo"
if [ -d ".git" -o -f ".git" ]; then
echo "# git repo already created"
exit 1
fi
git init
fishlamp init-repo
""" | mit | -900,394,647,457,741,800 | 19.783217 | 260 | 0.628745 | false | 3.180942 | false | false | false |
rcatwood/Savu | savu/plugins/filters/raven_filter.py | 2 | 3483 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: raven_filter
:platform: Unix
:synopsis: A plugin remove ring artefacts
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import logging
import numpy as np
import pyfftw
from savu.plugins.base_filter import BaseFilter
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin
@register_plugin
class RavenFilter(BaseFilter, CpuPlugin):
"""
Ring artefact removal method
:param uvalue: To define the shape of filter. Default: 30.
:param vvalue: How many rows to be applied the filter. Default: 1.
:param nvalue: To define the shape of filter. Default: 8.
:param padFT: Padding for Fourier transform. Default: 20.
"""
def __init__(self):
logging.debug("Starting Raven Filter")
super(RavenFilter, self).__init__("RavenFilter")
self.count=0
def set_filter_padding(self, in_data, out_data):
self.pad = self.parameters['padFT']
# don't currently have functionality to pad top/bottom but not
# right/left so padding everywhere for now
in_data[0].padding = {'pad_frame_edges': self.pad}
out_data[0].padding = {'pad_frame_edges': self.pad}
def pre_process(self):
in_pData = self.get_plugin_in_datasets()[0]
sino_shape = in_pData.get_shape()
width1 = sino_shape[1] + 2*self.pad
height1 = sino_shape[0] + 2*self.pad
v0 = np.abs(self.parameters['vvalue'])
u0 = np.abs(self.parameters['uvalue'])
n = np.abs(self.parameters['nvalue'])
# Create filter
centerx = np.ceil(width1/2.0)-1.0
centery = np.int16(np.ceil(height1/2.0)-1)
self.row1 = centery - v0
self.row2 = centery + v0+1
listx = np.arange(width1)-centerx
filtershape = 1.0/(1.0 + np.power(listx/u0, 2*n))
filtershapepad2d = np.zeros((self.row2 - self.row1, filtershape.size))
filtershapepad2d[:] = np.float64(filtershape)
self.filtercomplex = filtershapepad2d + filtershapepad2d*1j
a = pyfftw.n_byte_align_empty((height1, width1), 16, 'complex128')
self.fft_object = pyfftw.FFTW(a, a, axes=(0, 1))
self.ifft_object = pyfftw.FFTW(a, a, axes=(0, 1),
direction='FFTW_BACKWARD')
def filter_frames(self, data):
if(self.count%25==0):
logging.debug( "raven...%i"%self.count)
data2d = data[0]
sino2 = np.fft.fftshift(self.fft_object(data2d))
sino2[self.row1:self.row2] = \
sino2[self.row1:self.row2] * self.filtercomplex
sino3 = np.fft.ifftshift(sino2)
sino4 = self.ifft_object(sino3).real
sino4 = sino4[:, np.newaxis, :]
self.count+=1
return sino4
def get_plugin_pattern(self):
return 'SINOGRAM'
def get_max_frames(self):
return 1
| gpl-3.0 | 6,595,525,374,976,213,000 | 34.540816 | 78 | 0.645134 | false | 3.298295 | false | false | false |
manhtuhtk/mlpy | mlpy/dimred.py | 1 | 24673 | ## This code is written by Davide Albanese, <[email protected]>.
## (C) 2011 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.linalg as spla
from ridge import ridge_base
from ols import ols_base
from kernel_class import *
import sys
if sys.version >= '3':
from . import kernel
else:
import kernel
__all__ = ['LDA', 'SRDA', 'KFDA', 'PCA', 'PCAFast', 'KPCA']
def proj(u, v):
"""(<v, u> / <u, u>) u
"""
return (np.dot(v, u) / np.dot(u, u)) * u
def gso(v, norm=False):
"""Gram-Schmidt orthogonalization.
Vectors v_1, ..., v_k are stored by rows.
"""
for j in range(v.shape[0]):
for i in range(j):
v[j] = v[j] - proj(v[i], v[j])
if norm:
v[j] /= np.linalg.norm(v[j])
def lda(xarr, yarr):
"""Linear Discriminant Analysis.
Returns the transformation matrix `coeff` (P, C-1),
where `x` is a matrix (N,P) and C is the number of
classes. Each column of `x` represents a variable,
while the rows contain observations. Each column of
`coeff` contains coefficients for one transformation
vector.
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object (N, P)
data matrix
y : 1d array_like object integer (N)
class labels
:Returns:
coeff: 2d numpy array (P, P)
transformation matrix.
"""
n, p = xarr.shape[0], xarr.shape[1]
labels = np.unique(yarr)
sw = np.zeros((p, p), dtype=np.float)
for i in labels:
idx = np.where(yarr==i)[0]
sw += np.cov(xarr[idx], rowvar=0) * \
(idx.shape[0] - 1)
st = np.cov(xarr, rowvar=0) * (n - 1)
sb = st - sw
evals, evecs = spla.eig(sb, sw, overwrite_a=True,
overwrite_b=True)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evecs = evecs[:, :labels.shape[0]-1]
return evecs
def srda(xarr, yarr, alpha):
"""Spectral Regression Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
Sample(s) can be embedded into the C-1 dimensional space
by z = x coeff (z = np.dot(x, coeff)).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
alpha : float (>=0)
regularization parameter
:Returns:
coeff : 2d numpy array (P, C-1)
tranformation matrix
"""
# Point 1 in section 4.2
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
# Point 2 in section 4.2
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i] = ridge_base(xarr, yk[i], alpha)
return ak.T
def pca(xarr, method='svd'):
"""Principal Component Analysis.
Returns the principal component coefficients `coeff`(K,K)
and the corresponding eigenvalues (K) of the covariance
matrix of `x` (N,P) sorted by decreasing eigenvalue, where
K=min(N,P). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the M (<=K) dimensional
space by z = x coeff_M (z = np.dot(x, coeff[:, :M])).
:Parameters:
x : 2d numpy array (N, P)
data matrix
method : str
'svd' or 'cov'
:Returns:
coeff, evals : 2d numpy array (K, K), 1d numpy array (K)
principal component coefficients (eigenvectors of
the covariance matrix of x) and eigenvalues sorted by
decreasing eigenvalue.
"""
n, p = xarr.shape
if method == 'svd':
x_h = (xarr - np.mean(xarr, axis=0)) / np.sqrt(n - 1)
u, s, v = np.linalg.svd(x_h.T, full_matrices=False)
evecs = u
evals = s**2
elif method == 'cov':
k = np.min((n, p))
C = np.cov(xarr, rowvar=0)
evals, evecs = np.linalg.eigh(C)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
evecs = evecs[:, :k]
evals = evals[:k]
else:
raise ValueError("method must be 'svd' or 'cov'")
return evecs, evals
def pca_fast(xarr, m, eps):
"""Fast principal component analysis using the fixed-point
algorithm.
Returns the first `m` principal component coefficients
`coeff` (P, M). Each column of `x` represents a variable,
while the rows contain observations. Each column of `coeff`
contains coefficients for one principal component.
Sample(s) can be embedded into the m (<=P) dimensional space
by z = x coeff (z = np.dot(X, coeff)).
:Parameters:
x : 2d numpy array (N, P)
data matrix
m : integer (0 < m <= P)
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
:Returns:
coeff : 2d numpy array (P, H)
principal component coefficients
"""
m = int(m)
np.random.seed(0)
evecs = np.random.rand(m, xarr.shape[1])
C = np.cov(xarr, rowvar=0)
for i in range(0, m):
while True:
evecs_old = np.copy(evecs[i])
evecs[i] = np.dot(C, evecs[i])
# Gram-Schmidt orthogonalization
a = np.dot(evecs[i], evecs[:i].T).reshape(-1, 1)
b = a * evecs[:i]
evecs[i] -= np.sum(b, axis=0) # if i=0 sum is 0
# Normalization
evecs[i] = evecs[i] / np.linalg.norm(evecs[i])
# convergence criteria
if np.abs(np.dot(evecs[i], evecs_old) - 1) < eps:
break
return evecs.T
def lda_fast(xarr, yarr):
"""Fast implementation of Linear Discriminant Analysis.
Returns the (P, C-1) transformation matrix, where
`x` is a centered matrix (N,P) and C is the number of classes.
Each column of `x` represents a variable, while the
rows contain observations. `x` must be centered
(subtracting the empirical mean vector from each column
of`x`).
:Parameters:
x : 2d array_like object
training data (N, P)
y : 1d array_like object integer
target values (N)
:Returns:
A : 2d numpy array (P, C-1)
tranformation matrix
"""
yu = np.unique(yarr)
yk = np.zeros((yu.shape[0]+1, yarr.shape[0]), dtype=np.float)
yk[0] = 1.
for i in range(1, yk.shape[0]):
yk[i][yarr==yu[i-1]] = 1.
gso(yk, norm=False) # orthogonalize yk
yk = yk[1:-1]
ak = np.empty((yk.shape[0], xarr.shape[1]), dtype=np.float)
for i in range(yk.shape[0]):
ak[i], _ = ols_base(xarr, yk[i], -1)
return ak.T
def kpca(K):
"""Kernel Principal Component Analysis, PCA in
a kernel-defined feature space making use of the
dual representation.
Returns the kernel principal component coefficients
`coeff` (N, N) computed as :math:`\lambda^{-1/2} \mathbf{v}_j`
where :math:`\lambda` and :math:`\mathbf{v}` are the ordered
eigenvalues and the corresponding eigenvector of the centered
kernel matrix K.
Sample(s) can be embedded into the G (<=N) dimensional space
by z = K coeff_G (z = np.dot(K, coeff[:, :G])).
:Parameters:
K: 2d array_like object (N,N)
precomputed centered kernel matrix
:Returns:
coeff, evals: 2d numpy array (N,N), 1d numpy array (N)
kernel principal component coefficients, eigenvalues
sorted by decreasing eigenvalue.
"""
evals, evecs = np.linalg.eigh(K)
idx = np.argsort(evals)
idx = idx[::-1]
evecs = evecs[:, idx]
evals = evals[idx]
for i in range(len(evals)):
evecs[:, i] /= np.sqrt(evals[i])
return evecs, evals
def kfda(Karr, yarr, lmb=0.001):
"""Kernel Fisher Discriminant Analysis.
Returns the transformation matrix `coeff` (N,1),
where `K` is a the kernel matrix (N,N) and y
is the class labels (the alghoritm works only with 2
classes).
:Parameters:
K: 2d array_like object (N, N)
precomputed kernel matrix
y : 1d array_like object integer (N)
class labels
lmb : float (>= 0.0)
regularization parameter
:Returns:
coeff: 2d numpy array (N,1)
kernel fisher coefficients.
"""
labels = np.unique(yarr)
n = yarr.shape[0]
idx1 = np.where(yarr==labels[0])[0]
idx2 = np.where(yarr==labels[1])[0]
n1 = idx1.shape[0]
n2 = idx2.shape[0]
K1, K2 = Karr[:, idx1], Karr[:, idx2]
N1 = np.dot(np.dot(K1, np.eye(n1) - (1 / float(n1))), K1.T)
N2 = np.dot(np.dot(K2, np.eye(n2) - (1 / float(n2))), K2.T)
N = N1 + N2 + np.diag(np.repeat(lmb, n))
M1 = np.sum(K1, axis=1) / float(n1)
M2 = np.sum(K2, axis=1) / float(n2)
M = M1 - M2
coeff = np.linalg.solve(N, M).reshape(-1, 1)
return coeff
class LDA:
"""Linear Discriminant Analysis.
"""
def __init__(self, method='cov'):
"""Initialization.
:Parameters:
method : str
'cov' or 'fast'
"""
self._coeff = None
self._mean = None
if method not in ['cov', 'fast']:
raise ValueError("method must be 'cov' or 'fast'")
self._method = method
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
if self._method == 'cov':
self._coeff = lda(xarr, yarr)
elif self._method == 'fast':
self._coeff = lda_fast(xarr-self._mean, yarr)
def transform(self, t):
"""Embed `t` (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class SRDA:
"""Spectral Regression Discriminant Analysis.
"""
def __init__(self, alpha=0.001):
"""Initialization.
:Parameters:
alpha : float (>=0)
regularization parameter
"""
self._coeff = None
self._mean = None
self._alpha = alpha
def learn(self, x, y):
"""Computes the transformation matrix.
`x` is a matrix (N,P) and `y` is a vector containing
the class labels. Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if xarr.shape[0] != yarr.shape[0]:
raise ValueError("x, y shape mismatch")
self._mean = np.mean(xarr, axis=0)
self._coeff = srda(xarr-self._mean, yarr, self._alpha)
def transform(self, t):
"""Embed t (M,P) into the C-1 dimensional space.
Returns a (M,C-1) matrix.
"""
if self._coeff is None:
raise ValueError("no model computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
ValueError("t, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (P,C-1), where
C is the number of classes. Each column contains
coefficients for one transformation vector.
"""
return self._coeff
class KFDA:
"""Kernel Fisher Discriminant Analysis.
"""
def __init__(self, lmb=0.001, kernel=None):
"""Initialization.
:Parameters:
lmb : float (>= 0.0)
regularization parameter
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._kernel = kernel
self._x = None
self._coeff = None
self._lmb = lmb
def learn(self, K, y):
"""Computes the transformation vector.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
y : 1d array_like object integer (N)
class labels (only two classes)
"""
Karr = np.array(K, dtype=np.float)
yarr = np.asarray(y, dtype=np.int)
if yarr.ndim != 1:
raise ValueError("y must be an 1d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
labels = np.unique(yarr)
if labels.shape[0] != 2:
raise ValueError("number of classes must be = 2")
self._coeff = kfda(Karr, yarr, self._lmb)
def transform(self, Kt):
"""Embed Kt into the 1d kernel fisher space.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no model computed")
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
try:
return np.dot(Ktarr, self._coeff)
except:
ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation vector (N,1).
"""
return self._coeff
class PCA:
"""Principal Component Analysis.
"""
def __init__(self, method='svd', whiten=False):
"""Initialization.
:Parameters:
method : str
method, 'svd' or 'cov'
whiten : bool
whitening. The eigenvectors will be scaled
by eigenvalues**-(1/2)
"""
self._coeff = None
self._coeff_inv = None
self._evals = None
self._mean = None
self._method = method
self._whiten = whiten
def learn(self, x):
"""Compute the principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff, self._evals = pca(x, method=self._method)
if self._whiten:
self._coeff_inv = np.empty((self._coeff.shape[1],
self._coeff.shape[0]), dtype=np.float)
for i in range(len(self._evals)):
eval_sqrt = np.sqrt(self._evals[i])
self._coeff_inv[i] = self._coeff[:, i] * \
eval_sqrt
self._coeff[:, i] /= eval_sqrt
else:
self._coeff_inv = self._coeff.T
def transform(self, t, k=None):
"""Embed `t` (M,P) into the k dimensional subspace.
Returns a (M,K) matrix. If `k` =None will be set to
min(N,P)
"""
if self._coeff is None:
raise ValueError("no PCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff[:, :k])
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv[:zarr.shape[1]]) +\
self._mean
def coeff(self):
"""Returns the tranformation matrix (P,L), where
L=min(N,P), sorted by decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (L,P),
where L=min(N,P), sorted by decreasing eigenvalue.
"""
return self._coeff_inv
def evals(self):
"""Returns sorted eigenvalues (L), where L=min(N,P).
"""
return self._evals
class PCAFast:
"""Fast Principal Component Analysis.
"""
def __init__(self, k=2, eps=0.01):
"""Initialization.
:Parameters:
k : integer
the number of principal axes or eigenvectors required
eps : float (> 0)
tolerance error
"""
self._coeff = None
self._coeff_inv = None
self._mean = None
self._k = k
self._eps = eps
def learn(self, x):
"""Compute the firsts `k` principal component coefficients.
`x` is a matrix (N,P). Each column of `x` represents a
variable, while the rows contain observations.
"""
xarr = np.asarray(x, dtype=np.float)
if xarr.ndim != 2:
raise ValueError("x must be a 2d array_like object")
self._mean = np.mean(xarr, axis=0)
self._coeff = pca_fast(xarr, m=self._k, eps=self._eps)
self._coeff_inv = self._coeff.T
def transform(self, t):
"""Embed t (M,P) into the `k` dimensional subspace.
Returns a (M,K) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
tarr = np.asarray(t, dtype=np.float)
try:
return np.dot(tarr-self._mean, self._coeff)
except:
raise ValueError("t, coeff: shape mismatch")
def transform_inv(self, z):
"""Transform data back to its original space,
where `z` is a (M,K) matrix. Returns a (M,P) matrix.
"""
if self._coeff is None:
raise ValueError("no PCA computed")
zarr = np.asarray(z, dtype=np.float)
return np.dot(zarr, self._coeff_inv) + self._mean
def coeff(self):
"""Returns the tranformation matrix (P,K) sorted by
decreasing eigenvalue.
Each column contains coefficients for one principal
component.
"""
return self._coeff
def coeff_inv(self):
"""Returns the inverse of tranformation matrix (K,P),
sorted by decreasing eigenvalue.
"""
return self._coeff_inv
class KPCA:
"""Kernel Principal Component Analysis.
"""
def __init__(self, kernel=None):
"""Initialization.
:Parameters:
kernel : None or mlpy.Kernel object.
if kernel is None, K and Kt in .learn()
and in .transform() methods must be precomputed kernel
matricies, else K and Kt must be training (resp.
test) data in input space.
"""
if kernel is not None:
if not isinstance(kernel, Kernel):
raise ValueError("kernel must be None or a mlpy.Kernel object")
self._coeff = None
self._evals = None
self._K = None
self._kernel = kernel
self._x = None
def learn(self, K):
"""Compute the kernel principal component coefficients.
:Parameters:
K: 2d array_like object
precomputed training kernel matrix (if kernel=None);
training data in input space (if kernel is a Kernel object)
"""
Karr = np.asarray(K, dtype=np.float)
if Karr.ndim != 2:
raise ValueError("K must be a 2d array_like object")
if self._kernel is None:
if Karr.shape[0] != Karr.shape[1]:
raise ValueError("K must be a square matrix")
else:
self._x = Karr.copy()
Karr = self._kernel.kernel(Karr, Karr)
self._K = Karr.copy()
Karr = kernel.kernel_center(Karr, Karr)
self._coeff, self._evals = kpca(Karr)
def transform(self, Kt, k=None):
"""Embed Kt into the `k` dimensional subspace.
:Parameters:
Kt : 1d or 2d array_like object
precomputed test kernel matrix. (if kernel=None);
test data in input space (if kernel is a Kernel object).
"""
if self._coeff is None:
raise ValueError("no KPCA computed")
if k == None:
k = self._coeff.shape[1]
if k < 1 or k > self._coeff.shape[1]:
raise ValueError("k must be in [1, %d] or None" % \
self._coeff.shape[1])
Ktarr = np.asarray(Kt, dtype=np.float)
if self._kernel is not None:
Ktarr = self._kernel.kernel(Ktarr, self._x)
Ktarr = kernel.kernel_center(Ktarr, self._K)
try:
return np.dot(Ktarr, self._coeff[:, :k])
except:
raise ValueError("Kt, coeff: shape mismatch")
def coeff(self):
"""Returns the tranformation matrix (N,N) sorted by
decreasing eigenvalue.
"""
return self._coeff
def evals(self):
"""Returns sorted eigenvalues (N).
"""
return self._evals
| unlicense | -3,130,848,422,209,936,400 | 28.233412 | 79 | 0.545657 | false | 3.715813 | false | false | false |
ccapudev/django-padlock | padlock/middleware.py | 1 | 4658 | # -*- coding: utf-8 -*-
from uuid import uuid4
from django.conf import settings
from django import http
from django.contrib.auth.views import logout_then_login
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.deprecation import MiddlewareMixin
except Exception as e:
MiddlewareMixin = object
import logging
logger = logging.getLogger('django')
def setAndWarning(attrib,value):
defined = getattr(settings,attrib,None)
if defined is not None:
if type(defined) != type(value):
logger.warning("{0} in settings is not valid type.. set to {1}".format(attrib,value))
return value
else:
return defined
logger.warning("{0} no defined on settings... set to {1}".format(attrib,value))
return value
# Lista los paths a omitir, ['*'] omite todos los paths
PADLOCK_SKIP_PATHS = setAndWarning("PADLOCK_SKIP_PATHS",[])
# Establece el tiempo de vida de la cookies (el valor recomendable es >= 120)
PADLOCK_COOKIE_AGE = setAndWarning("PADLOCK_COOKIE_AGE",3600*12)
# Boolean que indica cerrar sesión en caso de intento de romper seguridad
PADLOCK_AUTHBROKEN_LOGOUT = setAndWarning("PADLOCK_AUTHBROKEN_LOGOUT",False)
# URL de redirecionamiento luego de cerrar sesión
# Note que si empieza con '/' o ' http' se usa como string
# de caso contrario se usa como un pattern name
PADLOCK_LOGOUT_REDIRECT_TO = setAndWarning("PADLOCK_LOGOUT_REDIRECT_TO",'/')
# Define el nombre de las cockies
PADLOCK_PREFIX = setAndWarning("PADLOCK_PREFIX",'padlock')
fake_key_pieces = str(uuid4()).split('-')
def getURIRedirect():
if PADLOCK_LOGOUT_REDIRECT_TO.startswith('/'):
return PADLOCK_LOGOUT_REDIRECT_TO
if PADLOCK_LOGOUT_REDIRECT_TO.startswith('?'):
return PADLOCK_LOGOUT_REDIRECT_TO
elif PADLOCK_LOGOUT_REDIRECT_TO.startswith('http'):
return PADLOCK_LOGOUT_REDIRECT_TO
else:
return reverse(PADLOCK_LOGOUT_REDIRECT_TO)
def authFailAction(request):
if PADLOCK_AUTHBROKEN_LOGOUT:
if request.is_authenticated:
return logout_then_login(request, getURIRedirect())
response = http.HttpResponseForbidden()
for keynum,row in enumerate(fake_key_pieces):
response.delete_cookie(PADLOCK_PREFIX+'_id_%d' % keynum)
response.delete_cookie('sessionid')
return response
# PADLOCK_SKIP_PATHS = ['/es/','/auth/ingresar/'] or ['*'] for all pages
def locksmith_build_job(response,PadLockKey):
pieces = PadLockKey.split('-')
for keynum,row in enumerate(pieces):
response.set_cookie(PADLOCK_PREFIX+'_id_%d' % keynum,row,max_age=PADLOCK_COOKIE_AGE)
return response
def locksmith_restore_job(request):
pieces = []
for keynum,row in enumerate(fake_key_pieces):
piece = request.COOKIES.get(PADLOCK_PREFIX+'_id_%d' % keynum,None)
if piece is None:
return False
else:
pieces.append(piece)
return '-'.join(pieces,)
class PadLockMiddleware(MiddlewareMixin):
def process_request(self, request):
if getattr(request,"user",None) is None:
return None
if '*' in PADLOCK_SKIP_PATHS or request.path in PADLOCK_SKIP_PATHS:
return None
if request.user.is_authenticated:
padlock_id = locksmith_restore_job(request)
if not padlock_id:
return authFailAction(request)
if padlock_id != request.session.get(PADLOCK_PREFIX,None):
return authFailAction(request)
if request.method == 'POST':
padlock_id = locksmith_restore_job(request)
if not padlock_id:
return authFailAction(request)
if padlock_id != request.session.get(PADLOCK_PREFIX,None):
return authFailAction(request)
return None
def process_response(self, request, response):
if getattr(request,"user",None) is None:
return response
if '*' in PADLOCK_SKIP_PATHS or request.path in PADLOCK_SKIP_PATHS:
return response
if PADLOCK_PREFIX+'_id_0' in request.COOKIES:
# print("hay un PadLock existente")
if request.user.is_authenticated:
if locksmith_restore_job(request) != request.session.get(PADLOCK_PREFIX,False):
return authFailAction(request)
# return http.HttpResponseForbidden()
else:
if locksmith_restore_job(request) and request.session.get(PADLOCK_PREFIX,None) is None:
# print("Seteando nuevo PadLock habiendo un padlock Cookie")
padlock_id = str(uuid4())
request.session[PADLOCK_PREFIX] = padlock_id
response = locksmith_build_job(response,padlock_id)
else:
if request.method != 'POST':
# print("Seteando nuevo PadLock")
padlock_id = str(uuid4())
request.session[PADLOCK_PREFIX] = padlock_id
response = locksmith_build_job(response,padlock_id)
else:
# print("No se ha Seteando un nuevo PadLock por ser un post directo")
pass
return response
| gpl-3.0 | -8,411,344,404,613,324,000 | 30.673469 | 91 | 0.734751 | false | 3.087533 | false | false | false |
osm-fr/osmose-backend | plugins/TagFix_MultipleTag_Lang_es.py | 4 | 3001 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class TagFix_MultipleTag_Lang_es(Plugin):
only_for = ["es"]
def init(self, logger):
Plugin.init(self, logger)
self.errors[30326] = self.def_class(item = 3032, level = 1, tags = ['tag', 'fix:chair'],
title = T_('Watch multiple tags'))
import re
self.Panaderia = re.compile(u"panader.a (.*)", re.IGNORECASE)
def node(self, data, tags):
err = []
if not "name" in tags:
return err
if not "shop" in tags:
panaderia = self.Panaderia.match(tags["name"])
if panaderia:
err.append({"class": 30326, "subclass": 0, "fix": {"+": {"shop": "bakery"}, "~": {"name": panaderia.group(1)} }})
return err
def way(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = TagFix_MultipleTag_Lang_es(None)
class _config:
options = {"language": "es"}
class father:
config = _config()
a.father = father()
a.init(None)
for t in [{"name": u"Panadería Doña Neli"},
]:
self.check_err(a.node(None, t), t)
for t in [{"name": u"Panadería Doña Neli", "shop": "b"},
]:
assert not a.way(None, t, None), t
| gpl-3.0 | -7,155,217,066,608,390,000 | 38.933333 | 129 | 0.451753 | false | 4.470149 | false | false | false |
tropo/tropo-webapi-python | samples/gh-25.conference.py | 1 | 1720 | #!/usr/bin/env python
"""
Hello world script for Session API ( https://www.tropo.com/docs/webapi/sessionapi.htm )
Upon launch, it will trigger a message to be sent via Jabber to the addess specified in
'number'.
"""
# Sample application using the itty-bitty python web framework from:
# http://github.com/toastdriven/itty
from itty import *
from tropo import Tropo, Session, JoinPrompt, LeavePrompt
from urllib import urlencode
from urllib2 import urlopen
@post('/index.json')
def index(request):
session = Session(request.body)
t = Tropo()
#jj = JoinPrompt(value = "who are you who let you come in")
jj = JoinPrompt("who are you who let you come in")
#ll = LeavePrompt(value = "byebye samsung")
ll = LeavePrompt("byebye samsung")
t.call(to=session.parameters['callToNumber'], network='SIP')
t.conference(id='yuxiangj', joinPrompt=jj.json, leavePrompt=ll.json)
t.say(session.parameters['message'])
return t.RenderJsonSDK()
#base_url = 'http://api.tropo.com/1.0/sessions'
base_url = 'http://192.168.26.21:8080/gateway/sessions'
token = '7776687947547a6261677359524e665670427145574f544e44616b5a64456d6c526b576265647448516e796c' # Insert your token here
action = 'create'
#number = 'sip:[email protected]:5678' # change to the Jabber ID to which you want to send the message
number = 'sip:[email protected]:5678' # change to the Jabber ID to which you want to send the message
message = 'hello from the session API!'
params = urlencode([('action', action), ('token', token), ('callToNumber', number), ('message', message)])
data = urlopen('%s?%s' % (base_url, params)).read()
#run_itty(server='wsgiref', host='0.0.0.0', port=8888)
run_itty(config='sample_conf')
| mit | 4,612,223,179,358,499,000 | 38.090909 | 124 | 0.719186 | false | 3.017544 | false | false | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/__init__.py | 1 | 26641 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
| apache-2.0 | -742,716,316,089,213,000 | 39.860429 | 418 | 0.5719 | false | 4.205367 | true | false | false |
googleapis/googleapis-gen | google/cloud/dialogflow/v2/dialogflow-v2-py/tests/unit/gapic/dialogflow_v2/test_conversation_profiles.py | 1 | 98122 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.conversation_profiles import ConversationProfilesAsyncClient
from google.cloud.dialogflow_v2.services.conversation_profiles import ConversationProfilesClient
from google.cloud.dialogflow_v2.services.conversation_profiles import pagers
from google.cloud.dialogflow_v2.services.conversation_profiles import transports
from google.cloud.dialogflow_v2.services.conversation_profiles.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import conversation_profile
from google.cloud.dialogflow_v2.types import conversation_profile as gcd_conversation_profile
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ConversationProfilesClient._get_default_mtls_endpoint(None) is None
assert ConversationProfilesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert ConversationProfilesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_service_account_always_use_jwt(client_class):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
client = client_class(credentials=creds)
use_jwt.assert_not_called()
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.ConversationProfilesGrpcTransport, "grpc"),
(transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_service_account_always_use_jwt_true(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
@pytest.mark.parametrize("client_class", [
ConversationProfilesClient,
ConversationProfilesAsyncClient,
])
def test_conversation_profiles_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_conversation_profiles_client_get_transport_class():
transport = ConversationProfilesClient.get_transport_class()
available_transports = [
transports.ConversationProfilesGrpcTransport,
]
assert transport in available_transports
transport = ConversationProfilesClient.get_transport_class("grpc")
assert transport == transports.ConversationProfilesGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(ConversationProfilesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesClient))
@mock.patch.object(ConversationProfilesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesAsyncClient))
def test_conversation_profiles_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ConversationProfilesClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ConversationProfilesClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc", "true"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc", "false"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(ConversationProfilesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesClient))
@mock.patch.object(ConversationProfilesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ConversationProfilesAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_conversation_profiles_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(ConversationProfilesClient, transports.ConversationProfilesGrpcTransport, "grpc"),
(ConversationProfilesAsyncClient, transports.ConversationProfilesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_conversation_profiles_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_conversation_profiles_client_client_options_from_dict():
with mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = ConversationProfilesClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_conversation_profiles(transport: str = 'grpc', request_type=conversation_profile.ListConversationProfilesRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse(
next_page_token='next_page_token_value',
)
response = client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConversationProfilesPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_conversation_profiles_from_dict():
test_list_conversation_profiles(request_type=dict)
def test_list_conversation_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
client.list_conversation_profiles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
@pytest.mark.asyncio
async def test_list_conversation_profiles_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.ListConversationProfilesRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse(
next_page_token='next_page_token_value',
))
response = await client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.ListConversationProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConversationProfilesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_from_dict():
await test_list_conversation_profiles_async(request_type=dict)
def test_list_conversation_profiles_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.ListConversationProfilesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
call.return_value = conversation_profile.ListConversationProfilesResponse()
client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_conversation_profiles_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.ListConversationProfilesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse())
await client.list_conversation_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_conversation_profiles_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_conversation_profiles(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_conversation_profiles_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_conversation_profiles(
conversation_profile.ListConversationProfilesRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_conversation_profiles_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ListConversationProfilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ListConversationProfilesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_conversation_profiles(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_conversation_profiles_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_conversation_profiles(
conversation_profile.ListConversationProfilesRequest(),
parent='parent_value',
)
def test_list_conversation_profiles_pager():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_conversation_profiles(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, conversation_profile.ConversationProfile)
for i in results)
def test_list_conversation_profiles_pages():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
pages = list(client.list_conversation_profiles(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_pager():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
async_pager = await client.list_conversation_profiles(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, conversation_profile.ConversationProfile)
for i in responses)
@pytest.mark.asyncio
async def test_list_conversation_profiles_async_pages():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_conversation_profiles),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
next_page_token='abc',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[],
next_page_token='def',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
],
next_page_token='ghi',
),
conversation_profile.ListConversationProfilesResponse(
conversation_profiles=[
conversation_profile.ConversationProfile(),
conversation_profile.ConversationProfile(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_conversation_profiles(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_conversation_profile(transport: str = 'grpc', request_type=conversation_profile.GetConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_get_conversation_profile_from_dict():
test_get_conversation_profile(request_type=dict)
def test_get_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
client.get_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
@pytest.mark.asyncio
async def test_get_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.GetConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.GetConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_get_conversation_profile_async_from_dict():
await test_get_conversation_profile_async(request_type=dict)
def test_get_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.GetConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
call.return_value = conversation_profile.ConversationProfile()
client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.GetConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile())
await client.get_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_conversation_profile(
conversation_profile.GetConversationProfileRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_conversation_profile(
conversation_profile.GetConversationProfileRequest(),
name='name_value',
)
def test_create_conversation_profile(transport: str = 'grpc', request_type=gcd_conversation_profile.CreateConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_create_conversation_profile_from_dict():
test_create_conversation_profile(request_type=dict)
def test_create_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
client.create_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
@pytest.mark.asyncio
async def test_create_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=gcd_conversation_profile.CreateConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.CreateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_create_conversation_profile_async_from_dict():
await test_create_conversation_profile_async(request_type=dict)
def test_create_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.CreateConversationProfileRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
call.return_value = gcd_conversation_profile.ConversationProfile()
client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.CreateConversationProfileRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
await client.create_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_conversation_profile(
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
def test_create_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_conversation_profile(
gcd_conversation_profile.CreateConversationProfileRequest(),
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_conversation_profile(
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
@pytest.mark.asyncio
async def test_create_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_conversation_profile(
gcd_conversation_profile.CreateConversationProfileRequest(),
parent='parent_value',
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
)
def test_update_conversation_profile(transport: str = 'grpc', request_type=gcd_conversation_profile.UpdateConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
)
response = client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
def test_update_conversation_profile_from_dict():
test_update_conversation_profile(request_type=dict)
def test_update_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
client.update_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
@pytest.mark.asyncio
async def test_update_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=gcd_conversation_profile.UpdateConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile(
name='name_value',
display_name='display_name_value',
language_code='language_code_value',
))
response = await client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_conversation_profile.UpdateConversationProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_conversation_profile.ConversationProfile)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.language_code == 'language_code_value'
@pytest.mark.asyncio
async def test_update_conversation_profile_async_from_dict():
await test_update_conversation_profile_async(request_type=dict)
def test_update_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.UpdateConversationProfileRequest()
request.conversation_profile.name = 'conversation_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
call.return_value = gcd_conversation_profile.ConversationProfile()
client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'conversation_profile.name=conversation_profile.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_conversation_profile.UpdateConversationProfileRequest()
request.conversation_profile.name = 'conversation_profile.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
await client.update_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'conversation_profile.name=conversation_profile.name/value',
) in kw['metadata']
def test_update_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_conversation_profile(
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_conversation_profile(
gcd_conversation_profile.UpdateConversationProfileRequest(),
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_conversation_profile.ConversationProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_conversation_profile.ConversationProfile())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_conversation_profile(
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].conversation_profile == gcd_conversation_profile.ConversationProfile(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_conversation_profile(
gcd_conversation_profile.UpdateConversationProfileRequest(),
conversation_profile=gcd_conversation_profile.ConversationProfile(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_conversation_profile(transport: str = 'grpc', request_type=conversation_profile.DeleteConversationProfileRequest):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_conversation_profile_from_dict():
test_delete_conversation_profile(request_type=dict)
def test_delete_conversation_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
client.delete_conversation_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
@pytest.mark.asyncio
async def test_delete_conversation_profile_async(transport: str = 'grpc_asyncio', request_type=conversation_profile.DeleteConversationProfileRequest):
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == conversation_profile.DeleteConversationProfileRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_conversation_profile_async_from_dict():
await test_delete_conversation_profile_async(request_type=dict)
def test_delete_conversation_profile_field_headers():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.DeleteConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
call.return_value = None
client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_conversation_profile_field_headers_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = conversation_profile.DeleteConversationProfileRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_conversation_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_conversation_profile_flattened():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_conversation_profile_flattened_error():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_conversation_profile(
conversation_profile.DeleteConversationProfileRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_conversation_profile_flattened_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_conversation_profile),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_conversation_profile(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_conversation_profile_flattened_error_async():
client = ConversationProfilesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_conversation_profile(
conversation_profile.DeleteConversationProfileRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ConversationProfilesClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ConversationProfilesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ConversationProfilesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ConversationProfilesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.ConversationProfilesGrpcTransport,
)
def test_conversation_profiles_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ConversationProfilesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_conversation_profiles_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.ConversationProfilesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_conversation_profiles',
'get_conversation_profile',
'create_conversation_profile',
'update_conversation_profile',
'delete_conversation_profile',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
def test_conversation_profiles_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dialogflow_v2.services.conversation_profiles.transports.ConversationProfilesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ConversationProfilesTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ConversationProfilesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ConversationProfilesClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_conversation_profiles_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.ConversationProfilesGrpcTransport,
transports.ConversationProfilesGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_conversation_profiles_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ConversationProfilesGrpcTransport, grpc_helpers),
(transports.ConversationProfilesGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_conversation_profiles_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_conversation_profiles_host_no_port():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com'),
)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_conversation_profiles_host_with_port():
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com:8000'),
)
assert client.transport._host == 'dialogflow.googleapis.com:8000'
def test_conversation_profiles_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ConversationProfilesGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_conversation_profiles_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ConversationProfilesGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.ConversationProfilesGrpcTransport, transports.ConversationProfilesGrpcAsyncIOTransport])
def test_conversation_profiles_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_agent_path():
project = "squid"
expected = "projects/{project}/agent".format(project=project, )
actual = ConversationProfilesClient.agent_path(project)
assert expected == actual
def test_parse_agent_path():
expected = {
"project": "clam",
}
path = ConversationProfilesClient.agent_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_agent_path(path)
assert expected == actual
def test_conversation_model_path():
project = "whelk"
location = "octopus"
conversation_model = "oyster"
expected = "projects/{project}/locations/{location}/conversationModels/{conversation_model}".format(project=project, location=location, conversation_model=conversation_model, )
actual = ConversationProfilesClient.conversation_model_path(project, location, conversation_model)
assert expected == actual
def test_parse_conversation_model_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"conversation_model": "mussel",
}
path = ConversationProfilesClient.conversation_model_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_conversation_model_path(path)
assert expected == actual
def test_conversation_profile_path():
project = "winkle"
conversation_profile = "nautilus"
expected = "projects/{project}/conversationProfiles/{conversation_profile}".format(project=project, conversation_profile=conversation_profile, )
actual = ConversationProfilesClient.conversation_profile_path(project, conversation_profile)
assert expected == actual
def test_parse_conversation_profile_path():
expected = {
"project": "scallop",
"conversation_profile": "abalone",
}
path = ConversationProfilesClient.conversation_profile_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_conversation_profile_path(path)
assert expected == actual
def test_document_path():
project = "squid"
knowledge_base = "clam"
document = "whelk"
expected = "projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}".format(project=project, knowledge_base=knowledge_base, document=document, )
actual = ConversationProfilesClient.document_path(project, knowledge_base, document)
assert expected == actual
def test_parse_document_path():
expected = {
"project": "octopus",
"knowledge_base": "oyster",
"document": "nudibranch",
}
path = ConversationProfilesClient.document_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_document_path(path)
assert expected == actual
def test_knowledge_base_path():
project = "cuttlefish"
knowledge_base = "mussel"
expected = "projects/{project}/knowledgeBases/{knowledge_base}".format(project=project, knowledge_base=knowledge_base, )
actual = ConversationProfilesClient.knowledge_base_path(project, knowledge_base)
assert expected == actual
def test_parse_knowledge_base_path():
expected = {
"project": "winkle",
"knowledge_base": "nautilus",
}
path = ConversationProfilesClient.knowledge_base_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_knowledge_base_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = ConversationProfilesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = ConversationProfilesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder, )
actual = ConversationProfilesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = ConversationProfilesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization, )
actual = ConversationProfilesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = ConversationProfilesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project, )
actual = ConversationProfilesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = ConversationProfilesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = ConversationProfilesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = ConversationProfilesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ConversationProfilesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.ConversationProfilesTransport, '_prep_wrapped_messages') as prep:
client = ConversationProfilesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.ConversationProfilesTransport, '_prep_wrapped_messages') as prep:
transport_class = ConversationProfilesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | 4,401,206,355,911,624,000 | 40.020903 | 249 | 0.675027 | false | 4.193069 | true | false | false |
alphagov/notifications-utils | notifications_utils/template.py | 1 | 28788 | import math
from abc import ABC, abstractmethod
from datetime import datetime
from functools import lru_cache
from html import unescape
from os import path
from jinja2 import Environment, FileSystemLoader
from markupsafe import Markup
from notifications_utils import LETTER_MAX_PAGE_COUNT, SMS_CHAR_COUNT_LIMIT
from notifications_utils.columns import Columns
from notifications_utils.countries.data import Postage
from notifications_utils.field import Field, PlainTextField
from notifications_utils.formatters import (
MAGIC_SEQUENCE,
add_prefix,
add_trailing_newline,
autolink_sms,
escape_html,
formatted_list,
make_quotes_smart,
nl2br,
normalise_multiple_newlines,
normalise_whitespace,
normalise_whitespace_and_newlines,
notify_email_markdown,
notify_email_preheader_markdown,
notify_letter_preview_markdown,
notify_plain_text_email_markdown,
remove_smart_quotes_from_email_addresses,
remove_whitespace_before_punctuation,
replace_hyphens_with_en_dashes,
replace_hyphens_with_non_breaking_hyphens,
sms_encode,
strip_leading_whitespace,
strip_unsupported_characters,
unlink_govuk_escaped,
)
from notifications_utils.postal_address import (
PostalAddress,
address_lines_1_to_7_keys,
)
from notifications_utils.sanitise_text import SanitiseSMS
from notifications_utils.take import Take
from notifications_utils.template_change import TemplateChange
template_env = Environment(loader=FileSystemLoader(
path.join(
path.dirname(path.abspath(__file__)),
'jinja_templates',
)
))
class Template(ABC):
encoding = "utf-8"
def __init__(
self,
template,
values=None,
redact_missing_personalisation=False,
):
if not isinstance(template, dict):
raise TypeError('Template must be a dict')
if values is not None and not isinstance(values, dict):
raise TypeError('Values must be a dict')
if template.get('template_type') != self.template_type:
raise TypeError(
f'Cannot initialise {self.__class__.__name__} '
f'with {template.get("template_type")} template_type'
)
self.id = template.get("id", None)
self.name = template.get("name", None)
self.content = template["content"]
self.values = values
self._template = template
self.redact_missing_personalisation = redact_missing_personalisation
def __repr__(self):
return "{}(\"{}\", {})".format(self.__class__.__name__, self.content, self.values)
@abstractmethod
def __str__(self):
pass
@property
def content_with_placeholders_filled_in(self):
return str(Field(
self.content,
self.values,
html='passthrough',
redact_missing_personalisation=self.redact_missing_personalisation,
markdown_lists=True,
)).strip()
@property
def values(self):
if hasattr(self, '_values'):
return self._values
return {}
@values.setter
def values(self, value):
if not value:
self._values = {}
else:
placeholders = Columns.from_keys(self.placeholders)
self._values = Columns(value).as_dict_with_keys(
self.placeholders | set(
key for key in value.keys()
if Columns.make_key(key) not in placeholders.keys()
)
)
@property
def placeholders(self):
return get_placeholders(self.content)
@property
def missing_data(self):
return list(
placeholder for placeholder in self.placeholders
if self.values.get(placeholder) is None
)
@property
def additional_data(self):
return self.values.keys() - self.placeholders
def get_raw(self, key, default=None):
return self._template.get(key, default)
def compare_to(self, new):
return TemplateChange(self, new)
@property
def content_count(self):
return len(self.content_with_placeholders_filled_in)
def is_message_empty(self):
if not self.content:
return True
if not self.content.startswith('((') or not self.content.endswith('))'):
# If the content doesn’t start or end with a placeholder we
# can guarantee it’s not empty, no matter what
# personalisation has been provided.
return False
return self.content_count == 0
def is_message_too_long(self):
return False
class BaseSMSTemplate(Template):
template_type = 'sms'
def __init__(
self,
template,
values=None,
prefix=None,
show_prefix=True,
sender=None,
):
self.prefix = prefix
self.show_prefix = show_prefix
self.sender = sender
self._content_count = None
super().__init__(template, values)
@property
def values(self):
return super().values
@values.setter
def values(self, value):
# If we change the values of the template it’s possible the
# content count will have changed, so we need to reset the
# cached count.
if self._content_count is not None:
self._content_count = None
# Assigning to super().values doesn’t work here. We need to get
# the property object instead, which has the special method
# fset, which invokes the setter it as if we were
# assigning to it outside this class.
super(BaseSMSTemplate, type(self)).values.fset(self, value)
@property
def content_with_placeholders_filled_in(self):
# We always call SMSMessageTemplate.__str__ regardless of
# subclass, to avoid any HTML formatting. SMS templates differ
# in that the content can include the service name as a prefix.
# So historically we’ve returned the fully-formatted message,
# rather than some plain-text represenation of the content. To
# preserve compatibility for consumers of the API we maintain
# that behaviour by overriding this method here.
return SMSMessageTemplate.__str__(self)
@property
def prefix(self):
return self._prefix if self.show_prefix else None
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def content_count(self):
"""
Return the number of characters in the message. Note that we don't distinguish between GSM and non-GSM
characters at this point, as `get_sms_fragment_count` handles that separately.
Also note that if values aren't provided, will calculate the raw length of the unsubstituted placeholders,
as in the message `foo ((placeholder))` has a length of 19.
"""
if self._content_count is None:
self._content_count = len(self._get_unsanitised_content())
return self._content_count
@property
def content_count_without_prefix(self):
# subtract 2 extra characters to account for the colon and the space,
# added max zero in case the content is empty the __str__ methods strips the white space.
if self.prefix:
return max((self.content_count - len(self.prefix) - 2), 0)
else:
return self.content_count
@property
def fragment_count(self):
content_with_placeholders = str(self)
# Extended GSM characters count as 2 characters
character_count = self.content_count + count_extended_gsm_chars(content_with_placeholders)
return get_sms_fragment_count(character_count, non_gsm_characters(content_with_placeholders))
def is_message_too_long(self):
"""
Message is validated with out the prefix.
We have decided to be lenient and let the message go over the character limit. The SMS provider will
send messages well over our limit. There were some inconsistencies with how we were validating the
length of a message. This should be the method used anytime we want to reject a message for being too long.
"""
return self.content_count_without_prefix > SMS_CHAR_COUNT_LIMIT
def is_message_empty(self):
return self.content_count_without_prefix == 0
def _get_unsanitised_content(self):
# This is faster to call than SMSMessageTemplate.__str__ if all
# you need to know is how many characters are in the message
if self.values:
values = self.values
else:
values = {
key: MAGIC_SEQUENCE for key in self.placeholders
}
return Take(PlainTextField(
self.content, values, html='passthrough'
)).then(
add_prefix, self.prefix
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
str.strip
).then(
str.replace, MAGIC_SEQUENCE, ''
)
class SMSMessageTemplate(BaseSMSTemplate):
def __str__(self):
return sms_encode(self._get_unsanitised_content())
class SMSBodyPreviewTemplate(BaseSMSTemplate):
def __init__(
self,
template,
values=None,
):
super().__init__(template, values, show_prefix=False)
def __str__(self):
return Markup(Take(Field(
self.content,
self.values,
html='escape',
redact_missing_personalisation=True,
)).then(
sms_encode
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
str.strip
))
class SMSPreviewTemplate(BaseSMSTemplate):
jinja_template = template_env.get_template('sms_preview_template.jinja2')
def __init__(
self,
template,
values=None,
prefix=None,
show_prefix=True,
sender=None,
show_recipient=False,
show_sender=False,
downgrade_non_sms_characters=True,
redact_missing_personalisation=False,
):
self.show_recipient = show_recipient
self.show_sender = show_sender
self.downgrade_non_sms_characters = downgrade_non_sms_characters
super().__init__(template, values, prefix, show_prefix, sender)
self.redact_missing_personalisation = redact_missing_personalisation
def __str__(self):
return Markup(self.jinja_template.render({
'sender': self.sender,
'show_sender': self.show_sender,
'recipient': Field('((phone number))', self.values, with_brackets=False, html='escape'),
'show_recipient': self.show_recipient,
'body': Take(Field(
self.content,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
add_prefix, (escape_html(self.prefix) or None) if self.show_prefix else None
).then(
sms_encode if self.downgrade_non_sms_characters else str
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
).then(
nl2br
).then(
autolink_sms
)
}))
class BaseBroadcastTemplate(BaseSMSTemplate):
template_type = 'broadcast'
MAX_CONTENT_COUNT_GSM = 1_395
MAX_CONTENT_COUNT_UCS2 = 615
@property
def encoded_content_count(self):
if self.non_gsm_characters:
return self.content_count
return self.content_count + count_extended_gsm_chars(
self.content_with_placeholders_filled_in
)
@property
def non_gsm_characters(self):
return non_gsm_characters(self.content)
@property
def max_content_count(self):
if self.non_gsm_characters:
return self.MAX_CONTENT_COUNT_UCS2
return self.MAX_CONTENT_COUNT_GSM
@property
def content_too_long(self):
return self.encoded_content_count > self.max_content_count
class BroadcastPreviewTemplate(BaseBroadcastTemplate, SMSPreviewTemplate):
jinja_template = template_env.get_template('broadcast_preview_template.jinja2')
class BroadcastMessageTemplate(BaseBroadcastTemplate, SMSMessageTemplate):
@classmethod
def from_content(cls, content):
return cls(
template={
'template_type': cls.template_type,
'content': content,
},
values=None, # events have already done interpolation of any personalisation
)
@classmethod
def from_event(cls, broadcast_event):
"""
should be directly callable with the results of the BroadcastEvent.serialize() function from api/models.py
"""
return cls.from_content(
broadcast_event['transmitted_content']['body']
)
def __str__(self):
return Take(Field(
self.content.strip(),
self.values,
html='escape',
)).then(
sms_encode
).then(
remove_whitespace_before_punctuation
).then(
normalise_whitespace_and_newlines
).then(
normalise_multiple_newlines
)
class SubjectMixin():
def __init__(
self,
template,
values=None,
**kwargs
):
self._subject = template['subject']
super().__init__(template, values, **kwargs)
@property
def subject(self):
return Markup(Take(Field(
self._subject,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
do_nice_typography
).then(
normalise_whitespace
))
@property
def placeholders(self):
return get_placeholders(self._subject) | super().placeholders
class BaseEmailTemplate(SubjectMixin, Template):
template_type = 'email'
@property
def html_body(self):
return Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_email_markdown
).then(
do_nice_typography
)
@property
def content_size_in_bytes(self):
return len(self.content_with_placeholders_filled_in.encode("utf8"))
def is_message_too_long(self):
"""
SES rejects email messages bigger than 10485760 bytes (just over 10 MB per message (after base64 encoding)):
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/quotas.html#limits-message
Base64 is apparently wasteful because we use just 64 different values per byte, whereas a byte can represent
256 different characters. That is, we use bytes (which are 8-bit words) as 6-bit words. There is
a waste of 2 bits for each 8 bits of transmission data. To send three bytes of information
(3 times 8 is 24 bits), you need to use four bytes (4 times 6 is again 24 bits). Thus the base64 version
of a file is 4/3 larger than it might be. So we use 33% more storage than we could.
https://lemire.me/blog/2019/01/30/what-is-the-space-overhead-of-base64-encoding/
That brings down our max safe size to 7.5 MB == 7500000 bytes before base64 encoding
But this is not the end! The message we send to SES is structured as follows:
"Message": {
'Subject': {
'Data': subject,
},
'Body': {'Text': {'Data': body}, 'Html': {'Data': html_body}}
},
Which means that we are sending the contents of email message twice in one request: once in plain text
and once with html tags. That means our plain text content needs to be much shorter to make sure we
fit within the limit, especially since HTML body can be much byte-heavier than plain text body.
Hence, we decided to put the limit at 1MB, which is equivalent of between 250 and 500 pages of text.
That's still an extremely long email, and should be sufficient for all normal use, while at the same
time giving us safe margin while sending the emails through Amazon SES.
EDIT: putting size up to 2MB as GOV.UK email digests are hitting the limit.
"""
return self.content_size_in_bytes > 2000000
class PlainTextEmailTemplate(BaseEmailTemplate):
def __str__(self):
return Take(Field(
self.content, self.values, html='passthrough', markdown_lists=True
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_plain_text_email_markdown
).then(
do_nice_typography
).then(
unescape
).then(
strip_leading_whitespace
).then(
add_trailing_newline
)
@property
def subject(self):
return Markup(Take(Field(
self._subject,
self.values,
html='passthrough',
redact_missing_personalisation=self.redact_missing_personalisation
)).then(
do_nice_typography
).then(
normalise_whitespace
))
class HTMLEmailTemplate(BaseEmailTemplate):
jinja_template = template_env.get_template('email_template.jinja2')
PREHEADER_LENGTH_IN_CHARACTERS = 256
def __init__(
self,
template,
values=None,
govuk_banner=True,
complete_html=True,
brand_logo=None,
brand_text=None,
brand_colour=None,
brand_banner=False,
brand_name=None
):
super().__init__(template, values)
self.govuk_banner = govuk_banner
self.complete_html = complete_html
self.brand_logo = brand_logo
self.brand_text = brand_text
self.brand_colour = brand_colour
self.brand_banner = brand_banner
self.brand_name = brand_name
@property
def preheader(self):
return " ".join(Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
)).then(
unlink_govuk_escaped
).then(
strip_unsupported_characters
).then(
add_trailing_newline
).then(
notify_email_preheader_markdown
).then(
do_nice_typography
).split())[:self.PREHEADER_LENGTH_IN_CHARACTERS].strip()
def __str__(self):
return self.jinja_template.render({
'subject': self.subject,
'body': self.html_body,
'preheader': self.preheader,
'govuk_banner': self.govuk_banner,
'complete_html': self.complete_html,
'brand_logo': self.brand_logo,
'brand_text': self.brand_text,
'brand_colour': self.brand_colour,
'brand_banner': self.brand_banner,
'brand_name': self.brand_name
})
class EmailPreviewTemplate(BaseEmailTemplate):
jinja_template = template_env.get_template('email_preview_template.jinja2')
def __init__(
self,
template,
values=None,
from_name=None,
from_address=None,
reply_to=None,
show_recipient=True,
redact_missing_personalisation=False,
):
super().__init__(template, values, redact_missing_personalisation=redact_missing_personalisation)
self.from_name = from_name
self.from_address = from_address
self.reply_to = reply_to
self.show_recipient = show_recipient
def __str__(self):
return Markup(self.jinja_template.render({
'body': self.html_body,
'subject': self.subject,
'from_name': escape_html(self.from_name),
'from_address': self.from_address,
'reply_to': self.reply_to,
'recipient': Field("((email address))", self.values, with_brackets=False),
'show_recipient': self.show_recipient
}))
@property
def subject(self):
return Take(Field(
self._subject,
self.values,
html='escape',
redact_missing_personalisation=self.redact_missing_personalisation
)).then(
do_nice_typography
).then(
normalise_whitespace
)
class BaseLetterTemplate(SubjectMixin, Template):
template_type = 'letter'
address_block = '\n'.join(
f'(({line.replace("_", " ")}))' for line in address_lines_1_to_7_keys
)
def __init__(
self,
template,
values=None,
contact_block=None,
admin_base_url='http://localhost:6012',
logo_file_name=None,
redact_missing_personalisation=False,
date=None,
):
self.contact_block = (contact_block or '').strip()
super().__init__(template, values, redact_missing_personalisation=redact_missing_personalisation)
self.admin_base_url = admin_base_url
self.logo_file_name = logo_file_name
self.date = date or datetime.utcnow()
@property
def subject(self):
return Take(Field(
self._subject,
self.values,
redact_missing_personalisation=self.redact_missing_personalisation,
html='escape',
)).then(
do_nice_typography
).then(
normalise_whitespace
)
@property
def placeholders(self):
return get_placeholders(self.contact_block) | super().placeholders
@property
def postal_address(self):
return PostalAddress.from_personalisation(Columns(self.values))
@property
def _address_block(self):
if self.postal_address.has_enough_lines and not self.postal_address.has_too_many_lines:
return self.postal_address.normalised_lines
if 'address line 7' not in self.values and 'postcode' in self.values:
self.values['address line 7'] = self.values['postcode']
return Field(
self.address_block,
self.values,
html='escape',
with_brackets=False,
).splitlines()
@property
def _contact_block(self):
return Take(Field(
'\n'.join(
line.strip()
for line in self.contact_block.split('\n')
),
self.values,
redact_missing_personalisation=self.redact_missing_personalisation,
html='escape',
)).then(
remove_whitespace_before_punctuation
).then(
nl2br
)
@property
def _date(self):
return self.date.strftime('%-d %B %Y')
@property
def _message(self):
return Take(Field(
self.content,
self.values,
html='escape',
markdown_lists=True,
redact_missing_personalisation=self.redact_missing_personalisation,
)).then(
add_trailing_newline
).then(
notify_letter_preview_markdown
).then(
do_nice_typography
).then(
replace_hyphens_with_non_breaking_hyphens
)
class LetterPreviewTemplate(BaseLetterTemplate):
jinja_template = template_env.get_template('letter_pdf/preview.jinja2')
def __str__(self):
return Markup(self.jinja_template.render({
'admin_base_url': self.admin_base_url,
'logo_file_name': self.logo_file_name,
# logo_class should only ever be None, svg or png
'logo_class': self.logo_file_name.lower()[-3:] if self.logo_file_name else None,
'subject': self.subject,
'message': self._message,
'address': self._address_block,
'contact_block': self._contact_block,
'date': self._date,
}))
class LetterPrintTemplate(LetterPreviewTemplate):
jinja_template = template_env.get_template('letter_pdf/print.jinja2')
class LetterImageTemplate(BaseLetterTemplate):
jinja_template = template_env.get_template('letter_image_template.jinja2')
first_page_number = 1
allowed_postage_types = (
Postage.FIRST,
Postage.SECOND,
Postage.EUROPE,
Postage.REST_OF_WORLD,
)
def __init__(
self,
template,
values=None,
image_url=None,
page_count=None,
contact_block=None,
postage=None,
):
super().__init__(template, values, contact_block=contact_block)
if not image_url:
raise TypeError('image_url is required')
if not page_count:
raise TypeError('page_count is required')
if postage not in [None] + list(self.allowed_postage_types):
raise TypeError('postage must be None, {}'.format(formatted_list(
self.allowed_postage_types,
conjunction='or',
before_each='\'',
after_each='\'',
)))
self.image_url = image_url
self.page_count = int(page_count)
self._postage = postage
@property
def postage(self):
if self.postal_address.international:
return self.postal_address.postage
return self._postage
@property
def last_page_number(self):
return min(self.page_count, LETTER_MAX_PAGE_COUNT) + self.first_page_number
@property
def page_numbers(self):
return list(range(self.first_page_number, self.last_page_number))
@property
def postage_description(self):
return {
Postage.FIRST: 'first class',
Postage.SECOND: 'second class',
Postage.EUROPE: 'international',
Postage.REST_OF_WORLD: 'international',
}.get(self.postage)
@property
def postage_class_value(self):
return {
Postage.FIRST: 'letter-postage-first',
Postage.SECOND: 'letter-postage-second',
Postage.EUROPE: 'letter-postage-international',
Postage.REST_OF_WORLD: 'letter-postage-international',
}.get(self.postage)
def __str__(self):
return Markup(self.jinja_template.render({
'image_url': self.image_url,
'page_numbers': self.page_numbers,
'address': self._address_block,
'contact_block': self._contact_block,
'date': self._date,
'subject': self.subject,
'message': self._message,
'show_postage': bool(self.postage),
'postage_description': self.postage_description,
'postage_class_value': self.postage_class_value,
}))
def get_sms_fragment_count(character_count, non_gsm_characters):
if non_gsm_characters:
return 1 if character_count <= 70 else math.ceil(float(character_count) / 67)
else:
return 1 if character_count <= 160 else math.ceil(float(character_count) / 153)
def non_gsm_characters(content):
"""
Returns a set of all the non gsm characters in a text. this doesn't include characters that we will downgrade (eg
emoji, ellipsis, ñ, etc). This only includes welsh non gsm characters that will force the entire SMS to be encoded
with UCS-2.
"""
return set(content) & set(SanitiseSMS.WELSH_NON_GSM_CHARACTERS)
def count_extended_gsm_chars(content):
return sum(
map(content.count, SanitiseSMS.EXTENDED_GSM_CHARACTERS)
)
def do_nice_typography(value):
return Take(
value
).then(
remove_whitespace_before_punctuation
).then(
make_quotes_smart
).then(
remove_smart_quotes_from_email_addresses
).then(
replace_hyphens_with_en_dashes
)
@lru_cache(maxsize=1024)
def get_placeholders(content):
return Field(content).placeholders
| mit | -2,500,035,368,561,753,600 | 30.313384 | 120 | 0.59975 | false | 4.16575 | false | false | false |
maciekswat/Twedit | Plugins/CC3DMLHelper/adhesionflexdlg.py | 1 | 2715 | import re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4.QtCore as QtCore
import ui_adhesionflexdlg
import sys
import string
MAC = "qt_mac_set_native_menubar" in dir()
class AdhesionFlexDlg(QDialog,ui_adhesionflexdlg.Ui_AdhesionFlexDlg):
#signals
# gotolineSignal = QtCore.pyqtSignal( ('int',))
def __init__(self,_currentEditor=None,parent=None):
super(AdhesionFlexDlg, self).__init__(parent)
self.editorWindow=parent
self.setupUi(self)
if not MAC:
self.cancelPB.setFocusPolicy(Qt.NoFocus)
self.updateUi()
def keyPressEvent(self, event):
molecule=str(self.afMoleculeLE.text())
molecule=string.rstrip(molecule)
if event.key()==Qt.Key_Return :
if molecule!="":
self.on_afMoleculeAddPB_clicked()
event.accept()
@pyqtSignature("") # signature of the signal emited by the button
def on_afMoleculeAddPB_clicked(self):
molecule=str(self.afMoleculeLE.text())
molecule=string.rstrip(molecule)
rows=self.afTable.rowCount()
if molecule =="":
return
# check if molecule with this name already exist
moleculeAlreadyExists=False
for rowId in range(rows):
name=str(self.afTable.item(rowId,0).text())
name=string.rstrip(name)
if name==molecule:
moleculeAlreadyExists=True
break
if moleculeAlreadyExists:
QMessageBox.warning(self,"Molecule Name Already Exists","Molecule name already exist. Please choose different name",QMessageBox.Ok)
return
self.afTable.insertRow(rows)
moleculeItem=QTableWidgetItem(molecule)
self.afTable.setItem (rows,0, moleculeItem)
# reset molecule entry line
self.afMoleculeLE.setText("")
return
@pyqtSignature("") # signature of the signal emited by the button
def on_clearAFTablePB_clicked(self):
rows=self.afTable.rowCount()
for i in range (rows-1,-1,-1):
self.afTable.removeRow(i)
def extractInformation(self):
adhDict={}
for row in range(self.afTable.rowCount()):
molecule=str(self.afTable.item(row,0).text())
adhDict[row]=molecule
return adhDict,str(self.bindingFormulaLE.text())
def updateUi(self):
self.afTable.horizontalHeader().setStretchLastSection(True)
| gpl-3.0 | 7,394,871,278,212,216,000 | 31.518519 | 143 | 0.579742 | false | 4.151376 | false | false | false |
rochefort-lab/fissa | fissa/roitools.py | 1 | 14357 | """
Functions used for ROI manipulation.
Authors:
- Sander W Keemink <[email protected]>
"""
from __future__ import division
import numpy as np
from past.builtins import basestring
from skimage.measure import find_contours
try:
from collections import abc
except ImportError:
import collections as abc
from .polygons import poly2mask
from .readimagejrois import read_imagej_roi_zip
def get_mask_com(mask):
"""
Get the center of mass for a boolean mask.
Parameters
----------
mask : :term:`array_like`
A two-dimensional boolean-mask.
Returns
-------
x : float
Center of mass along first dimension.
y : float
Center of mass along second dimension.
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError(
"Mask must be two-dimensional. Received input with {} dimensions"
"".format(mask.ndim)
)
# TODO: make this work for non-boolean masks too
x, y = mask.nonzero()
return np.mean(x), np.mean(y)
def split_npil(mask, centre, num_slices, adaptive_num=False):
"""
Split a mask into approximately equal slices by area around its center.
Parameters
----------
mask : :term:`array_like`
Mask as a 2d boolean array.
centre : tuple
The center co-ordinates around which the mask will be split.
num_slices : int
The number of slices into which the mask will be divided.
adaptive_num : bool, optional
If ``True``, the `num_slices` input is treated as the number of
slices to use if the ROI is surrounded by valid pixels, and
automatically reduces the number of slices if it is on the
boundary of the sampled region.
Returns
-------
masks : list
A list with `num_slices` many masks, each of which is a 2d
boolean numpy array.
"""
# TODO: This should yield an iterable instead.
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Get the (x,y) co-ordinates of the pixels in the mask
x, y = mask.nonzero()
if x.size == 0 or y.size == 0:
raise ValueError("ROI mask must be not be empty")
# Find the angle of the vector from the mask centre to each pixel
theta = np.arctan2(x - centre[0], y - centre[1])
# Find where the mask comes closest to the centre. We will put a
# slice boundary here, to prevent one slice being non-contiguous
# for masks near the image boundary.
# TODO: give it the bins to use
n_bins = 20
n_bins = min(n_bins, len(mask))
bins = np.linspace(-np.pi, np.pi, n_bins + 1)
bin_counts, bins = np.histogram(theta, bins=bins)
bin_min_index = np.argmin(bin_counts)
if adaptive_num:
# Change the number of slices we will used based on the
# proportion of these bins which are empty
num_slices = round(num_slices * sum(bin_counts > 0) / n_bins)
num_slices = max(1, num_slices)
# Ensure num_slices is an integer number
num_slices = int(num_slices)
if num_slices < 1:
raise ValueError("Number of slices must be positive")
# Change theta so it is the angle relative to a new zero-point,
# the middle of the bin which is least populated by mask pixels.
theta_offset = bins[bin_min_index] + np.pi / n_bins
theta = (theta - theta_offset) % (2 * np.pi) - np.pi
# get the boundaries
bounds = [
np.percentile(theta, 100.0 * (i + 1) / num_slices) for i in range(num_slices)
]
# predefine the masks
masks = []
# get the first mask
# empty predefinition
mask = np.zeros(np.shape(mask), dtype=bool)
# set relevant pixels to True
mask[x[theta <= bounds[0]], y[theta <= bounds[0]]] = True
masks.append(mask)
# get the rest of the masks
for i in range(1, num_slices):
# find which pixels are within bounds
truths = (theta > bounds[i - 1]) * (theta <= bounds[i])
# empty predefinition
mask = np.zeros(np.shape(mask), dtype=bool)
# set relevant pixels to True
mask[x[truths], y[truths]] = True
masks.append(mask)
return masks
def shift_2d_array(a, shift=1, axis=0):
"""
Shift array values, without wrap around.
Parameters
----------
a : :term:`array_like`
Input array.
shift : int, optional
How much to shift array by. Default is ``1``.
axis : int, optional
The axis along which elements are shifted. Default is ``0``.
Returns
-------
out : numpy.ndarray
Array with the same shape as `a`, but shifted appropriately.
"""
# Ensure array_like input is a numpy.ndarray
a = np.asarray(a)
# do initial shift
out = np.roll(a, shift, axis)
# then fill in refilled parts of the array
if axis == 0:
if shift > 0:
out[:shift] = 0
elif shift < 0:
out[shift:] = 0
elif axis == 1:
if shift > 0:
out[:, :shift] = 0
elif shift < 0:
out[:, shift:] = 0
else:
raise ValueError("Axis must be 0 or 1, but {} was given.".format(axis))
# return shifted array
return out
def get_npil_mask(mask, totalexpansion=4):
"""
Given the masks for a ROI, find the surrounding neuropil.
Our implementation is as follows:
- On even iterations (where indexing begins at zero), expand
the mask in each of the 4 cardinal directions.
- On odd numbered iterations, expand the mask in each of the 4
diagonal directions.
This procedure generates a neuropil whose shape is similar to the
shape of the input ROI mask.
Parameters
----------
mask : :term:`array_like`
The reference ROI mask to expand the neuropil from. The array
should contain only boolean values.
totalexpansion : float, optional
How much larger to make the neuropil total area than mask area.
Default is ``4``.
Returns
-------
grown_mask : numpy.ndarray
A boolean numpy.ndarray mask, where the region surrounding
the input is now ``True`` and the region of the input mask is
``False``.
Note
----
For fixed number of `iterations`, squarer input masks will have
larger output neuropil masks.
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Make a copy of original mask which will be grown
grown_mask = np.copy(mask)
area_orig = grown_mask.sum() # original area
area_current = 0 # current size
shpe = np.shape(mask)
area_total = shpe[0] * shpe[1]
count = 0
# for count in range(iterations):
while (
area_current < totalexpansion * area_orig
and area_current < area_total - area_orig
):
# Check which case to use. In current version, we alternate
# between case 0 (cardinals) and case 1 (diagonals).
case = count % 2
# Make a copy of the mask without any new additions. We will
# need to keep using this mask to mark new changes, so we
# don't use a partially updated version.
refmask = np.copy(grown_mask)
if False: # case == 2: # Not currently used
# Move polygon around one pixel in each 8 directions
# N, NE, E, SE, S, SW, W, NW, (the centre is also redone)
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
movedmask = shift_2d_array(refmask, dx, 0)
movedmask = shift_2d_array(movedmask, dy, 1)
grown_mask[movedmask] = True
elif case == 0:
# Move polygon around one pixel in each of the 4 cardinal
# directions: N, E, S, W.
for dx in [-1, 1]:
grown_mask[shift_2d_array(refmask, dx, 0)] = True
for dy in [-1, 1]:
grown_mask[shift_2d_array(refmask, dy, 1)] = True
elif case == 1:
# Move polygon around one pixel in each of the 4 diagonal
# directions: NE, SE, SW, NW
for dx in [-1, 1]:
for dy in [-1, 1]:
movedmask = shift_2d_array(refmask, dx, 0)
movedmask = shift_2d_array(movedmask, dy, 1)
grown_mask[movedmask] = True
# update area
area_current = grown_mask.sum() - area_orig
# iterate counter
count += 1
# Remove original mask from the neuropil mask
grown_mask[mask] = False
# Return the finished neuropil mask
return grown_mask
def getmasks_npil(cellMask, nNpil=4, expansion=1):
"""
Generate neuropil masks using :func:`get_npil_mask` function.
Parameters
----------
cellMask : :term:`array_like`
The cell mask (boolean 2d arrays).
nNpil : int, optional
Number of neuropil subregions. Default is ``4``.
expansion : float, optional
Area of each neuropil region, relative to the area of `cellMask`.
Default is ``1``.
Returns
-------
masks_split : list
Returns a list with soma and neuropil masks (boolean 2d arrays).
"""
# Ensure array_like input is a numpy.ndarray
cellMask = np.asarray(cellMask)
# get the total neuropil for this cell
mask = get_npil_mask(cellMask, totalexpansion=expansion * nNpil)
# get the center of mass for the cell
centre = get_mask_com(cellMask)
# split it up in nNpil neuropils
masks_split = split_npil(mask, centre, nNpil)
return masks_split
def readrois(roiset):
"""
Read ImageJ rois from a roiset zipfile.
We ensure that the third dimension (i.e. frame number) is always zero.
Parameters
----------
roiset : str
Path to a roiset zipfile.
Returns
-------
rois : list
The ROIs (regions of interest) from within roiset, as polygons
describing the outline of each ROI.
"""
# read rois
rois = read_imagej_roi_zip(roiset)
# set frame number to 0 for every roi
for i in range(len(rois)):
if "polygons" in rois[i]:
rois[i] = rois[i]["polygons"][:, :2]
# check if we are looking at an oval roi
elif "mask" in rois[i]:
# this is an oval roi, which gets imported as a 3D mask.
# First get the frame that has the mask in it by finding the
# nonzero frame
mask_frame = np.nonzero(rois[i]["mask"])[0][0]
# get the mask
mask = rois[i]["mask"][mask_frame, :, :]
# finally, get the outline coordinates
rois[i] = find_roi_edge(mask)[0]
else:
raise ValueError(
"ROI #{} contains neither a polygon nor mask representation"
" of the region of interest."
"".format(i)
)
return rois
def getmasks(rois, shpe):
"""
Get the masks for the specified rois.
Parameters
----------
rois : :term:`array_like`
List of roi coordinates. Each roi coordinate should be a 2d-array
or equivalent list. i.e.:
``roi = [[0, 0], [0, 1], [1, 1], [1, 0]]``
or
``roi = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])``
i.e. a n by 2 array, where n is the number of coordinates.
If a 2 by n array is given, this will be transposed.
shpe : :term:`array_like`
Shape of underlying image ``(width, height)``.
Returns
-------
masks : :term:`list` of :class:`numpy.ndarray`
List of masks for each ROI in `rois`.
"""
# get number of rois
nrois = len(rois)
# start empty mask list
masks = []
for i in range(nrois):
# transpose if array of 2 by n
if np.asarray(rois[i]).shape[0] == 2:
rois[i] = np.asarray(rois[i]).T
# transform current roi to mask
mask = poly2mask(rois[i], shpe)
# store in list
masks.append(np.array(mask[0].todense()))
return masks
def find_roi_edge(mask):
"""
Find the outline of a mask.
Uses :func:`skimage.measure.find_contours`.
Parameters
----------
mask : :term:`array_like`
The mask, as a binary array.
Returns
-------
outline : :term:`list` of (n,2)-:class:`~numpy.ndarray`
Array with coordinates of pixels in the outline of the mask.
See Also
--------
skimage.measure.find_contours
"""
# Ensure array_like input is a numpy.ndarray
mask = np.asarray(mask)
# Pad with 0s to make sure that edge ROIs are properly estimated
mask_shape = np.shape(mask)
padded_shape = (mask_shape[0] + 2, mask_shape[1] + 2)
padded_mask = np.zeros(padded_shape)
padded_mask[1:-1, 1:-1] = mask
# detect contours
outline = find_contours(padded_mask, level=0.5)
# update coordinates to take into account padding and set so that the
# coordinates are defined from the corners (as in the mask2poly function
# in SIMA https://github.com/losonczylab/sima/blob/master/sima/ROI.py)
for i in range(len(outline)):
outline[i] -= 0.5
return outline
def rois2masks(rois, shape):
"""
Convert ROIs into a list of binary masks.
Parameters
----------
rois : str or list of array_like
Either a string containing a path to an ImageJ roi zip file,
or a list of arrays encoding polygons, or list of binary arrays
representing masks.
shape : array_like
Image shape as a length 2 vector.
Returns
-------
masks : list of numpy.ndarray
List of binary arrays.
"""
# If it's a string, parse the string
if isinstance(rois, basestring):
rois = readrois(rois)
if not isinstance(rois, abc.Sequence):
raise TypeError(
"Wrong ROIs input format: expected a list or sequence, but got"
" a {}".format(rois.__class__)
)
# If it's a something by 2 array (or vice versa), assume polygons
if np.shape(rois[0])[1] == 2 or np.shape(rois[0])[0] == 2:
return getmasks(rois, shape)
# If it's a list of bigger arrays, assume masks
elif np.shape(rois[0]) == shape:
return rois
raise ValueError("Wrong ROIs input format: unfamiliar shape.")
| gpl-3.0 | -5,895,978,743,753,075,000 | 28.724638 | 85 | 0.594135 | false | 3.746608 | false | false | false |
mefly2012/platform | src/clean_validate/qylogo.py | 1 | 3122 | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class qylogo():
"""中标"""
need_check_ziduan = [
u'key',
u'_id',
u'data_source',
u'bbd_version',
u'bbd_url',
u'rawdata',
u'bbd_uptime',
u'company_full_name',
u'source',
u'company_short',
u'uuid',
u'retain1',
u'retain2',
u'company_logo',
u'bbd_dotime'
]
def check_key(self, indexstr, ustr):
"""key 清洗验证"""
ret = None
return ret
def check__id(self, indexstr, ustr):
"""_id 清洗验证"""
ret = None
return ret
def check_data_source(self, indexstr, ustr):
"""datasource 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in (u'猎聘', u'拉勾'):
ret = u'不是指定字段'
else:
ret = u'为空'
return ret
def check_bbd_version(self, indexstr, ustr):
"""version 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if not public.all_num(ustr):
ret = u'不是全数字'
return ret
def check_bbd_url(self, indexstr, ustr):
"""url 清洗验证"""
ret = None
return ret
def check_rawdata(self, indexstr, ustr):
"""rawdata 清洗验证"""
ret = None
return ret
def check_bbd_uptime(self, indexstr, ustr):
"""uptime 清洗验证"""
ret = None
return ret
def check_company_full_name(self, indexstr, ustr):
"""企业全称 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
ret = None
# if not public.has_count_hz(ustr, 2):
# ret = u'没有两个以上汉字'
else:
ret = u'为空'
return ret
def check_source(self, indexstr, ustr):
"""Source 清洗验证"""
ret = None
if ustr and len(ustr.strip()):
if ustr not in (u'猎聘', u'拉勾'):
ret = u'不是指定字段'
else:
ret = u'为空'
return ret
def check_company_short(self, indexstr, ustr):
"""企业简称 清洗验证"""
ret = None
return ret
def check_uuid(self, indexstr, ustr):
"""uuid 清洗验证"""
ret = None
return ret
def check_retain1(self, indexstr, ustr):
"""retain1 清洗验证"""
ret = None
return ret
def check_retain2(self, indexstr, ustr):
"""retain2 清洗验证"""
ret = None
return ret
def check_company_logo(self, indexstr, ustr):
"""企业logo 清洗验证"""
ret = None
return ret
def check_bbd_dotime(self, indexstr, ustr):
"""do_time 清洗验证"""
ret = None
if ustr and len(ustr):
if not public.bbd_dotime_date_format(ustr):
ret = u"不合法日期"
return ret
| apache-2.0 | 3,653,252,823,672,933,400 | 20.25 | 55 | 0.483737 | false | 2.931034 | false | false | false |
Mir4r/ProjectMimir | MTFconfig.py | 1 | 2323 | #Collection of operations on the MTF config
#2015, K Schweiger
import os
import sys
import sharedfunctions
#returns a list. With following entrys:
#0: A list with specs that should be printed
def configreader(workdir):
lines = sharedfunctions.readFile(workdir, 'MTF.cfg')
config = {}
for line in lines:
#ignore Lines beginning with # or nothing in it
#Define keys and seperators for config elements
configkeys = {"printspec=" : [",", "str"],
"showspec=" : [",", "str"],
"numtoprint=" : [None, "int"],
"maxnamelen=" : [None, "int"],
"openedacc=" : [None, "str"],
"termwidth=" : [None, "int"],
"termheight=" : [None, "int"],
"nGenre=" : [None, "int"],
"genrePriority=" : [",", "str"],
"invisibleGenre=": [",", "str"]}
if len(line) > 0 and line[0] != "#":
for key in configkeys:
if line.startswith(key):
if configkeys[key][0] is not None:
config.update({key : line[len(key)::].split(configkeys[key][0])})
else:
if configkeys[key][1] is "str":
config.update({key : str(line[len(key)::])})
elif configkeys[key][1] is "int":
config.update({key : int(line[len(key)::])})
return config
def getconfigpart(workdir, cfg):
config = configreader(workdir)
if cfg == "SpecsToPrint":
return config["printspec="]
elif cfg == "SpecsToShow":
return config["showspec="]
elif cfg == "NumToPrint":
return config["numtoprint="]
elif cfg == "MaxNameLen":
return config["maxnamelen="]
elif cfg == "DateAcc":
return config["openedacc="]
elif cfg == "GenrePriority":
return config["genrePriority="]
elif cfg == "NumberofGenres":
return config["nGenre="]
elif cfg == "TerminalWidth":
return config["termwidth="]
elif cfg == "TerminalHeight":
return config["termheight="]
elif cfg == "InvisibleGenres":
return config["invisibleGenre="]
| mit | 5,431,476,628,116,495,000 | 37.081967 | 89 | 0.50409 | false | 4.032986 | true | false | false |
PalNilsson/pilot2 | pilot/user/atlas/resource/manytoone.py | 1 | 2447 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, [email protected], 2019
import os
# from pilot.util.container import execute
from pilot.common.errorcodes import ErrorCodes
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def verify_setup_command(cmd):
"""
Verify the setup command.
:param cmd: command string to be verified (string).
:return: pilot error code (int), diagnostics (string).
"""
ec = 0
diagnostics = ""
return ec, diagnostics
def get_setup_command(job, prepareasetup):
"""
Return the path to asetup command, the asetup command itself and add the options (if desired).
If prepareasetup is False, the function will only return the path to the asetup script. It is then assumed
to be part of the job parameters.
Handle the case where environmental variables are set -
HARVESTER_CONTAINER_RELEASE_SETUP_FILE, HARVESTER_LD_LIBRARY_PATH, HARVESTER_PYTHONPATH
This will create the string need for the pilot to execute to setup the environment.
:param job: job object.
:param prepareasetup: not used.
:return: setup command (string).
"""
cmd = ""
# return immediately if there is no release or if user containers are used
if job.swrelease == 'NULL' or '--containerImage' in job.jobparams:
logger.debug('get_setup_command return value: {}'.format(str(cmd)))
return cmd
# test if environmental variable HARVESTER_CONTAINER_RELEASE_SETUP_FILE is defined
setupfile = os.environ.get('HARVESTER_CONTAINER_RELEASE_SETUP_FILE', '')
if setupfile != "":
cmd = "source {};".format(setupfile)
# test if HARVESTER_LD_LIBRARY_PATH is defined
if os.environ.get('HARVESTER_LD_LIBRARY_PATH', '') != "":
cmd += "export LD_LIBRARY_PATH=$HARVESTER_LD_LIBRARY_PATH:$LD_LIBRARY_PATH;"
# test if HARVESTER_PYTHONPATH is defined
if os.environ.get('HARVESTER_PYTHONPATH', '') != "":
cmd += "export PYTHONPATH=$HARVESTER_PYTHONPATH:$PYTHONPATH;"
#unset FRONTIER_SERVER variable
cmd += "unset FRONTIER_SERVER"
logger.debug('get_setup_command return value: {}'.format(str(cmd)))
return cmd
| apache-2.0 | 2,694,031,994,876,218,000 | 32.986111 | 110 | 0.682877 | false | 3.701967 | false | false | false |
google/ci_edit | app/log.py | 1 | 5614 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import inspect
import os
import sys
import time
import traceback
import app.buffer_file
screenLog = [u"--- screen log ---"]
fullLog = [u"--- begin log ---"]
enabledChannels = {
u"meta": True,
#'mouse': True,
u"startup": True,
}
shouldWritePrintLog = False
startTime = time.time()
def get_lines():
return screenLog
def parse_lines(frame, logChannel, *args):
if not len(args):
args = [u""]
msg = str(args[0])
if 1:
msg = u"%s %s %s %s: %s" % (
logChannel,
os.path.split(frame[1])[1],
frame[2],
frame[3],
msg,
)
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = repr(i) # unicode(i)
msg += prior
return msg.split(u"\n")
def channel_enable(logChannel, isEnabled):
global fullLog, shouldWritePrintLog
fullLog += [
u"%10s %10s: %s %r" % (u"logging", u"channel_enable", logChannel, isEnabled)
]
if isEnabled:
enabledChannels[logChannel] = isEnabled
shouldWritePrintLog = True
else:
enabledChannels.pop(channel, None)
def channel(logChannel, *args):
global fullLog, screenLog
if logChannel in enabledChannels:
lines = parse_lines(inspect.stack()[2], logChannel, *args)
screenLog += lines
fullLog += lines
def caller(*args):
global fullLog, screenLog
priorCaller = inspect.stack()[2]
msg = (
u"%s %s %s"
% (os.path.split(priorCaller[1])[1], priorCaller[2], priorCaller[3]),
) + args
lines = parse_lines(inspect.stack()[1], u"caller", *msg)
screenLog += lines
fullLog += lines
def exception(e, *args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"except", *args)
fullLog += lines
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
def check_failed(prefix, a, op, b):
stack(u"failed %s %r %s %r" % (prefix, a, op, b))
raise Exception("fatal error")
def check_ge(a, b):
if a >= b:
return
check_failed(u"check_ge", a, u">=", b)
def check_gt(a, b):
if a > b:
return
check_failed(u"check_lt", a, u"<", b)
def check_le(a, b):
if a <= b:
return
check_failed(u"check_le", a, u"<=", b)
def check_lt(a, b):
if a < b:
return
check_failed(u"check_lt", a, u"<", b)
def stack(*args):
global fullLog, screenLog
callStack = inspect.stack()[1:]
callStack.reverse()
for i, frame in enumerate(callStack):
line = [
u"stack %2d %14s %4s %s"
% (i, os.path.split(frame[1])[1], frame[2], frame[3])
]
screenLog += line
fullLog += line
if len(args):
screenLog.append(u"stack " + repr(args[0]))
fullLog.append(u"stack " + repr(args[0]))
def info(*args):
channel(u"info", *args)
def meta(*args):
"""Log information related to logging."""
channel(u"meta", *args)
def mouse(*args):
channel(u"mouse", *args)
def parser(*args):
channel(u"parser", *args)
def startup(*args):
channel(u"startup", *args)
def quick(*args):
global fullLog, screenLog
msg = str(args[0])
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = i # unicode(i)
msg += prior
lines = msg.split(u"\n")
screenLog += lines
fullLog += lines
def debug(*args):
global fullLog, screenLog
if u"debug" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"debug_@@@", *args)
screenLog += lines
fullLog += lines
def detail(*args):
global fullLog
if u"detail" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"detail", *args)
fullLog += lines
def error(*args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"error", *args)
fullLog += lines
def when(*args):
args = (time.time() - startTime,) + args
channel(u"info", *args)
def wrapper(function, shouldWrite=True):
global shouldWritePrintLog
shouldWritePrintLog = shouldWrite
r = -1
try:
try:
r = function()
except BaseException:
shouldWritePrintLog = True
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
finally:
flush()
return r
def write_to_file(path):
fullPath = app.buffer_file.expand_full_path(path)
with io.open(fullPath, "w+", encoding=u"UTF-8") as out:
out.write(u"\n".join(fullLog) + u"\n")
def flush():
if shouldWritePrintLog:
sys.stdout.write(u"\n".join(fullLog) + u"\n")
| apache-2.0 | 4,760,439,883,177,038,000 | 22.588235 | 84 | 0.59031 | false | 3.386007 | false | false | false |
ackalker/ocrfeeder | src/ocrfeeder/feeder/documentGeneration.py | 1 | 18043 | # -*- coding: utf-8 -*-
###########################################################################
# OCRFeeder - The complete OCR suite
# Copyright (C) 2009 Joaquim Rocha
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
from ocrfeeder.odf.draw import Frame, TextBox, Image
from ocrfeeder.odf.opendocument import OpenDocumentText
from ocrfeeder.odf.style import Style, MasterPage, GraphicProperties, ParagraphProperties, \
TextProperties, PageLayout, PageLayoutProperties
from ocrfeeder.odf.text import P, Page, PageSequence
from pango import WEIGHT_BOLD, WEIGHT_NORMAL, STYLE_ITALIC, STYLE_NORMAL, \
STYLE_OBLIQUE
from ocrfeeder.util import TEXT_TYPE, IMAGE_TYPE, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER, \
ALIGN_FILL
from ocrfeeder.util.configuration import ConfigurationManager
from ocrfeeder.util.graphics import getImagePrintSize
from ocrfeeder.util.lib import debug
from reportlab.pdfgen import canvas
from reportlab.lib import units
from reportlab.lib.utils import ImageReader, simpleSplit
import math
import os.path
import shutil
import tempfile
class DocumentGeneratorManager(object):
GENERATORS = {}
def __init__(self):
pass
def register(self, id, generator):
self.GENERATORS[id] = generator
def get(self, id):
return self.GENERATORS.get(id)
def getFormats(self):
return self.GENERATORS.keys()
class DocumentGenerator(object):
def __init__(self):
pass
def makeDocument(self):
raise NotImplementedError('Method not defined!')
def addBox(self, data_box):
if data_box.getType() == TEXT_TYPE:
self.addText(data_box)
elif data_box.getType() == IMAGE_TYPE:
self.addImage(data_box)
def addText(self, data_box):
raise NotImplementedError('Method not defined!')
def addImage(self, data_box):
raise NotImplementedError('Method not defined!')
def addBoxes(self, data_boxes):
for data_box in data_boxes:
self.addBox(data_box)
def save(self):
raise NotImplementedError('Method not defined!')
def newPage(self):
raise NotImplementedError('Method not defined!')
def convertFontStyle(self, style):
raise NotImplementedError('Method not defined!')
def convertFontWeight(self, weight):
raise NotImplementedError('Method not defined!')
def convertTextAlign(self, align):
if align == ALIGN_LEFT:
return 'left'
elif align == ALIGN_RIGHT:
return 'right'
elif align == ALIGN_CENTER:
return 'center'
elif align == ALIGN_FILL:
return 'justified'
class HtmlGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.document = ''
self.bodies = []
self.styles = ''
self.style_names = []
self.images = []
def addText(self, data_box):
text_lines = data_box.getText().splitlines()
new_div = '''
<div style="position: absolute; margin-left: %(x)spx; margin-top: %(y)spx;">
<p class="%(class)s">%(text)s</p>
</div>
''' % {'class': self.__handleStyle(data_box.text_data), 'text': '<br/>'.join(text_lines), 'x': data_box.x, 'y': data_box.y}
self.bodies[-1] += new_div
def addImage(self, data_box):
format = 'PNG'
image_file = tempfile.mkstemp(dir = ConfigurationManager.TEMPORARY_FOLDER,
suffix = '.' + format.lower())[1]
data_box.image.save(image_file, format = format)
self.images.append(image_file)
new_div = '''
<div style="position: absolute; margin-left: %(x)spx; margin-top: %(y)spx;">
<img src="images/%(image)s" alt="%(image)s" />
</div>
''' % {'image': os.path.basename(image_file), 'x': data_box.x, 'y': data_box.y}
self.bodies[-1] += new_div
def __handleStyle(self, text_data):
style_name = 'style%s%s%s%s%s%s%s' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification,
text_data.weight, text_data.style)
if not style_name in self.style_names:
self.style_names.append(style_name)
self.styles += '''
.%(style_name)s {
font-family: %(face)s;
font-size: %(size)spt;
font-weight: %(weight)s;
font-style: %(style)s;
text-align: %(align)s;
letter-spacing: %(letter_space)spt;
line-height: %(line_space)spt;
}
''' % {'style_name':style_name, 'face': text_data.face,
'size': text_data.size, 'weight': self.convertFontWeight(text_data.weight),
'align': text_data.justification, 'style': self.convertFontStyle(text_data.style),
'line_space': text_data.line_space, 'letter_space': text_data.letter_space}
return style_name
def convertFontStyle(self, style):
if style == STYLE_OBLIQUE:
return 'oblique'
elif style == STYLE_ITALIC:
return 'italic'
return 'normal'
def convertFontWeight(self, weight):
if weight == WEIGHT_BOLD:
return 'bold'
return 'normal'
def addPage(self, page_data):
self.bodies.append('')
self.current_page_resolution = page_data.resolution
self.addBoxes(page_data.data_boxes)
def save(self):
pages = []
for i in xrange(len(self.bodies)):
previous_page = ''
next_page = ''
if i != 0:
if i - 1 == 0:
previous_page = '<a href="index.html">«</a>'
else:
previous_page = '<a href="page%s.html">«</a>' % (i)
elif i != len(self.bodies) - 1:
next_page = '<a href="page%s.html">»</a>' % (i + 2)
pages.append('''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>%(title)s</title>
<link rel="stylesheet" type="text/css" href="style.css" />
</head>
<body>
<div style="margin-left: auto; margin-right: auto; width: 800px; overflow: hidden;">
<div style="float: left;">
%(previous_page)s
</div>
<div style="float: right;">
%(next_page)s
</div>
</div>
<hr/>
%(body)s
</body>
</html>
''' % {'title': self.name, 'body': self.bodies[i], 'previous_page': previous_page, 'next_page': next_page}
)
if not os.path.isdir(self.name):
os.mkdir(self.name)
images_folder = os.path.join(self.name, 'images')
if not os.path.exists(images_folder):
os.mkdir(images_folder)
if pages:
file = open(os.path.join(self.name, 'index.html'), 'w')
file.write(pages[0])
file.close()
if len(pages) > 1:
for i in xrange(1, len(pages)):
file = open(os.path.join(self.name, 'page%s.html' % (i + 1)), 'w')
file.write(pages[i])
file.close()
if self.styles:
file = open(os.path.join(self.name, 'style.css'), 'w')
file.write(self.styles)
file.close()
for image in self.images:
shutil.move(image, images_folder)
class OdtGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.document = OpenDocumentText()
self.current_page = None
self.photo_style = Style(name="Photo", family="graphic")
self.document.styles.addElement(self.photo_style)
self.font_styles = []
self.page_layouts = []
self.page_masters = []
self.page_styles = []
self.temp_images = []
frame_style = Style(name='FrameStyle', family = 'graphic')
frame_style.addElement(GraphicProperties(borderlinewidth='none'))
self.document.styles.addElement(frame_style)
frame_style_rotated = Style(name='FrameStyleRotated', family = 'graphic')
frame_style_rotated.addElement(GraphicProperties(fill = 'none', stroke = 'none', verticalpos = 'from-top', verticalrel = 'paragraph'))
self.document.automaticstyles.addElement(frame_style_rotated)
def addText(self, data_box):
text = data_box.getText()
frame_style = Style(name='FrameStyle', family = 'graphic')
debug('Angle: ', data_box.text_data.angle)
angle = data_box.text_data.angle
if angle:
frame_style = Style(name='FrameStyleRotated', family = 'graphic')
x, y, width, height = data_box.getBoundsPrintSize(self.current_page_resolution)
frame = Frame(stylename = frame_style, width = str(width) + 'in', height = str(height) + 'in', x = str(x) + 'in', y = str(y) + 'in', anchortype = 'paragraph')
if angle:
frame.addAttribute('transform', 'rotate (%s) translate (%scm %scm)' % (abs(math.radians(angle)), x, y))
self.current_page.addElement(frame)
textbox = TextBox()
frame.addElement(textbox)
for line in text.split('\n'):
textbox.addElement(P(stylename = self.__handleFrameStyle(data_box.text_data), text = line))
def addImage(self, data_box):
format = 'PNG'
image_file = tempfile.mkstemp(dir = ConfigurationManager.TEMPORARY_FOLDER,
suffix = '.' + format)[1]
data_box.image.save(image_file, format = format)
x, y, width, height = data_box.getBoundsPrintSize(self.current_page_resolution)
photo_frame = Frame(stylename=self.photo_style, x = '%sin' % x, y = '%sin' % y, width = '%sin' % width, height = '%sin' % height, anchortype='paragraph')
self.current_page.addElement(photo_frame)
location = self.document.addPicture(image_file)
photo_frame.addElement(Image(href=location))
self.temp_images.append(image_file)
def newPage(self, page_data):
master_name = self.__handlePageMaster(page_data)
page_style_name = '%sPage' % master_name
if not page_style_name in self.page_styles:
page_style = Style(name = page_style_name, family = 'paragraph', masterpagename = master_name)
page_style.addElement(ParagraphProperties(breakbefore = 'page'))
self.document.automaticstyles.addElement(page_style)
new_page = P(stylename = page_style_name)
self.document.text.addElement(new_page)
return new_page
def addPage(self, page_data):
self.current_page = self.newPage(page_data)
self.current_page_resolution = page_data.resolution
self.addBoxes(page_data.data_boxes)
def save(self):
name = self.name
if not name.lower().endswith('.odt'):
name += '.odt'
self.document.save(name)
for image in self.temp_images:
try:
os.unlink(image)
except:
debug('Error removing image: %s' % image)
def __handlePageMaster(self, page_data):
layout_name = 'Page%s%s' % (page_data.width, page_data.height)
if not layout_name in self.page_layouts:
page_layout = PageLayout(name = layout_name)
page_layout.addElement(PageLayoutProperties(margintop = '0in', marginbottom = '0in', marginleft = '0in', marginright = '0in', pagewidth = '%sin' % page_data.width, pageheight = '%sin' % page_data.height))
self.document.automaticstyles.addElement(page_layout)
self.page_layouts.append(layout_name)
master_name = layout_name + 'Master'
if not master_name in self.page_masters:
master_page = MasterPage(name = master_name, pagelayoutname = layout_name)
self.document.masterstyles.addElement(master_page)
self.page_masters.append(master_name)
return master_name
def __handleFrameStyle(self, text_data):
style_name = 'box%s%s%s%s%s' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification)
if not style_name in self.font_styles:
frame_style = Style(name = style_name, family = 'paragraph')
frame_style.addElement(ParagraphProperties(linespacing = '%spt' % text_data.line_space, textalign = self.convertTextAlign(text_data.justification)))
frame_style.addElement(TextProperties(letterspacing = '%spt' % text_data.letter_space, fontstyle = self.convertFontStyle(text_data.style), fontweight = self.convertFontWeight(text_data.weight), fontsize = '%spt' % text_data.size, fontfamily = str(text_data.face)))
self.document.styles.addElement(frame_style)
self.font_styles.append(style_name)
return style_name
def __handleFrameStyleRotated(self, text_data):
style_name = 'box%s%s%s%s%sRotated' % (text_data.face, text_data.size, text_data.line_space,
text_data.letter_space, text_data.justification)
if not style_name in self.font_styles:
frame_style = Style(name = style_name, family = 'paragraph')
frame_style.addElement(ParagraphProperties(linespacing = '%spt' % text_data.line_space, textalign = self.convertTextAlign(text_data.justification)))
frame_style.addElement(TextProperties(letterspacing = '%spt' % text_data.letter_space, fontstyle = self.convertFontStyle(text_data.style), fontweight = self.convertFontWeight(text_data.weight), fontsize = '%spt' % text_data.size, fontfamily = str(text_data.face)))
self.document.automaticstyles.addElement(frame_style)
self.font_styles.append(style_name)
return style_name
def convertFontStyle(self, style):
if style == STYLE_OBLIQUE:
return 'oblique'
elif style == STYLE_ITALIC:
return 'italic'
return 'normal'
def convertFontWeight(self, weight):
if weight == WEIGHT_BOLD:
return 'bold'
return 'normal'
# Generates a .txt file
class PlaintextGenerator(DocumentGenerator):
def __init__(self, name):
self.name = name
self.text = ''
def addText(self, newText):
self.text += newText
def addPage(self, page):
self.addText(page.getTextFromBoxes())
def save(self):
try:
# This will create a new file or **overwrite an existing file
f = open(self.name, "w")
try:
f.write(self.text) # Write text to file
finally:
f.close() # Close the file
except IOError:
pass
class PdfGenerator(DocumentGenerator):
def __init__(self, name, from_scratch = False):
self.name = name
self._from_scratch = from_scratch
self.canvas = canvas.Canvas(self.name)
self.page_data = None
def addText(self, box):
x, y, width, height = box.getBoundsPrintSize(self.page_data.resolution)
text = self.canvas.beginText()
# Make the text transparent if we are not
# creating a PDF from scratch
if not self._from_scratch:
text.setTextRenderMode(3)
text.setTextOrigin(x * units.inch,
(self.page_data.height - y) * units.inch)
text.setCharSpace(box.text_data.letter_space)
text.setLeading(box.text_data.line_space + box.text_data.size)
text.moveCursor(0, box.text_data.size)
#todo: efficiently add the required font
self.canvas.setFontSize(box.text_data.size)
lines = simpleSplit(box.text,
self.canvas._fontname,
box.text_data.size,
box.width)
text.textLines('\n'.join(lines))
self.canvas.drawText(text)
def addImage(self, box):
# Do nothing as the images will be already
# seen in the PDF
if not self._from_scratch:
return
x, y, width, height = box.getBoundsPrintSize(self.page_data.resolution)
self.canvas.drawInlineImage(box.image,
x * units.inch,
(self.page_data.height - (y + height)) * \
units.inch,
width * units.inch,
height * units.inch)
def addPage(self, page_data):
self.canvas.setPageSize((page_data.width * units.inch,
page_data.height * units.inch))
self.page_data = page_data
# Paste the source image that users will read
# in the PDF
if not self._from_scratch:
image = ImageReader(page_data.image_path)
self.canvas.drawImage(image, 0, 0,
page_data.width * units.inch,
page_data.height * units.inch)
self.addBoxes(page_data.data_boxes)
self.canvas.showPage()
def save(self):
self.canvas.save()
manager = DocumentGeneratorManager()
manager.register('HTML', HtmlGenerator)
manager.register('ODT', OdtGenerator)
manager.register('TXT', PlaintextGenerator)
manager.register('PDF', PdfGenerator)
| gpl-3.0 | 8,515,736,416,175,934,000 | 39.913832 | 276 | 0.59458 | false | 3.725583 | false | false | false |
goyoregalado/OMSTD | examples/develop/io/001/io-001-s1.py | 2 | 3626 | # -*- coding: utf-8 -*-
"""
Project name: Open Methodology for Security Tool Developers
Project URL: https://github.com/cr0hn/OMSTD
Copyright (c) 2014, cr0hn<-AT->cr0hn.com
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'cr0hn - cr0hn<-at->cr0hn.com (@ggdaniel)'
# ----------------------------------------------------------------------
class Displayer:
instance = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = object.__new__(cls, *args, **kwargs)
cls.__initialized = False
return cls.instance
def config(self, **kwargs):
self.out_file = kwargs.get("out_file", None)
self.out_screen = kwargs.get("out_screen", True)
self.verbosity = kwargs.get("verbosity", 0)
if self.out_file:
self.out_file_handler = open(self.out_file, "w")
def display(self, message):
if self.verbosity > 0:
self.__display(message)
def display_verbosity(self, message):
if self.verbosity > 1:
self.__display(message)
def display_more_verbosity(self, message):
if self.verbosity > 2:
self.__display(message)
def __display(self, message):
if self.out_screen:
print(message)
if self.out_file_handler:
self.out_file_handler.write(message)
def __init__(self):
if not self.__initialized:
self.__initialized = True
self.out_file = None
self.out_file_handler = None
self.out_screen = True
self.verbosity = 0
# ----------------------------------------------------------------------
def hello():
"""Display a hello world text"""
# Use displayer
out = Displayer()
out.display("hello")
out.display_verbosity("hello")
# This will not be displayed by the verbosity level to 1
out.display_more_verbosity("hello")
# ----------------------------------------------------------------------
if __name__ == '__main__':
# Config displayer
d = Displayer()
d.config(out_screen=True,
out_file="~/my_log.txt",
verbosity=1)
# Call function
hello(1)
| bsd-2-clause | -69,709,552,672,010,790 | 37.574468 | 755 | 0.636514 | false | 4.487624 | false | false | false |
mattjmorrison/logilab-common-clone | test/unittest_decorators.py | 1 | 4634 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for the decorators module
"""
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.decorators import monkeypatch, cached, clear_cache, copy_cache
class DecoratorsTC(TestCase):
def test_monkeypatch_with_same_name(self):
class MyClass: pass
@monkeypatch(MyClass)
def meth1(self):
return 12
self.assertEqual([attr for attr in dir(MyClass) if attr[:2] != '__'],
['meth1'])
inst = MyClass()
self.assertEqual(inst.meth1(), 12)
def test_monkeypatch_with_custom_name(self):
class MyClass: pass
@monkeypatch(MyClass, 'foo')
def meth2(self, param):
return param + 12
self.assertEqual([attr for attr in dir(MyClass) if attr[:2] != '__'],
['foo'])
inst = MyClass()
self.assertEqual(inst.foo(4), 16)
def test_cannot_cache_generator(self):
def foo():
yield 42
self.assertRaises(AssertionError, cached, foo)
def test_cached_preserves_docstrings_and_name(self):
class Foo(object):
@cached
def foo(self):
""" what's up doc ? """
def bar(self, zogzog):
""" what's up doc ? """
bar = cached(bar, 1)
@cached
def quux(self, zogzog):
""" what's up doc ? """
self.assertEqual(Foo.foo.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.foo.__name__, 'foo')
self.assertEqual(Foo.foo.func_name, 'foo')
self.assertEqual(Foo.bar.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.bar.__name__, 'bar')
self.assertEqual(Foo.bar.func_name, 'bar')
self.assertEqual(Foo.quux.__doc__, """ what's up doc ? """)
self.assertEqual(Foo.quux.__name__, 'quux')
self.assertEqual(Foo.quux.func_name, 'quux')
def test_cached_single_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self):
""" what's up doc ? """
foo = Foo()
foo.foo()
self.assertTrue(hasattr(foo, '_foo'))
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_multi_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self, args):
""" what's up doc ? """
foo = Foo()
foo.foo(1)
self.assertEqual(foo._foo, {(1,): None})
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_keyarg_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo', keyarg=1)
def foo(self, other, args):
""" what's up doc ? """
foo = Foo()
foo.foo(2, 1)
self.assertEqual(foo._foo, {2: None})
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_cached_property(self):
class Foo(object):
@property
@cached(cacheattr=u'_foo')
def foo(self):
""" what's up doc ? """
foo = Foo()
foo.foo
self.assertEqual(foo._foo, None)
clear_cache(foo, 'foo')
self.assertFalse(hasattr(foo, '_foo'))
def test_copy_cache(self):
class Foo(object):
@cached(cacheattr=u'_foo')
def foo(self, args):
""" what's up doc ? """
foo = Foo()
foo.foo(1)
self.assertEqual(foo._foo, {(1,): None})
foo2 = Foo()
self.assertFalse(hasattr(foo2, '_foo'))
copy_cache(foo2, 'foo', foo)
self.assertEqual(foo2._foo, {(1,): None})
if __name__ == '__main__':
unittest_main()
| gpl-2.0 | -9,145,041,881,857,837,000 | 34.374046 | 82 | 0.56107 | false | 3.801477 | true | false | false |
LTS5/connectomeviewer | cviewer/plugins/codeoracle/actions.py | 1 | 11325 | import logging
from apptools.io.api import File
from pyface.api import FileDialog, OK
from pyface.action.api import Action
from traits.api import Any
from cviewer.plugins.text_editor.editor.text_editor import TextEditor
from cviewer.plugins.ui.preference_manager import preference_manager
# Logging imports
import logging
logger = logging.getLogger('root.'+__name__)
class NetworkVizTubes(Action):
tooltip = "Show 3D Network with Tubes"
description = "Show 3D Network with Tubes and colorcoded Nodes"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import threedviz2
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(threedviz2)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class NetworkReport(Action):
tooltip = "Network Report"
description = "Network Report"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import reportlab
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(reportlab)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class WriteGEXF(Action):
tooltip = "Write Gephi GEXF file"
description = "Write Gephi GEXF file"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import writegexf
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(writegexf)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class CorticoCortico(Action):
tooltip = "Extract cortico-cortico fibers"
description = "Extract cortico-cortico fibers"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import corticocortico
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(corticocortico)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class NipypeBet(Action):
tooltip = "Brain extraction using BET"
description = "Brain extraction using BET"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import nipypebet
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(nipypebet)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowTracks(Action):
tooltip = "Show tracks between two regions"
description = "Show tracks between two regions"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import ctrackedge
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(ctrackedge)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class XNATPushPull(Action):
tooltip = "Push and pull files from and to XNAT Server"
description = "Push and pull files from and to XNAT Server"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from scripts import pushpull
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(pushpull)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ComputeNBS(Action):
tooltip = "Compute NBS"
description = "Compute NBS"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
# from cnetwork_nbs_action import NBSNetworkParameter, NBSMoreParameter
from scripts import nbsscript
# cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
#
# no = NBSNetworkParameter(cfile)
# no.edit_traits(kind='livemodal')
#
# if (len(no.selected1) == 0 or len(no.selected2) == 0):
# return
#
# mo = NBSMoreParameter(cfile, no.selected1[0], no.selected2[0])
# mo.edit_traits(kind='livemodal')
#
# import datetime as dt
# a=dt.datetime.now()
# ostr = '%s%s%s' % (a.hour, a.minute, a.second)
# if not (len(no.selected1) == 0 or len(no.selected2) == 0):
# # if cancel, not create surface
# # create a temporary file
# import tempfile
# myf = tempfile.mktemp(suffix='.py', prefix='my')
# f=open(myf, 'w')
# f.write(nbsscript % (str(no.selected1),
# mo.first_edge_value,
# str(no.selected2),
# mo.second_edge_value,
# mo.THRES,
# mo.K,
# mo.TAIL,
# ostr))
# f.close()
#
# self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(nbsscript)
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowNetworks(Action):
tooltip = "Create a 3D Network"
description = "Create a 3D Network"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import NetworkParameter
from scripts import netscript
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = NetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(netscript % (no.netw[no.graph]['name'],
no.node_position,
no.edge_value,
no.node_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ConnectionMatrix(Action):
tooltip = "Show connection matrix"
description = "Show connection matrix"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import MatrixNetworkParameter
from scripts import conmatrix
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = MatrixNetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(conmatrix % (no.netw[no.graph]['name'],
no.node_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class SimpleConnectionMatrix(Action):
tooltip = "Show simple connection matrix"
description = "Show simple connection matrix"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cnetwork_action import MatrixEdgeNetworkParameter
from scripts import conmatrixpyplot
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
no = MatrixEdgeNetworkParameter(cfile)
no.edit_traits(kind='livemodal')
if not no.netw[no.graph]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(conmatrixpyplot % (no.netw[no.graph]['name'],
no.edge_label))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowSurfaces(Action):
""" Open a new file in the text editor
"""
tooltip = "Create a surface"
description = "Create a surface"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from csurface_action import SurfaceParameter
from scripts import surfscript
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
so = SurfaceParameter(cfile)
so.edit_traits(kind='livemodal')
if not so.pointset_da[so.pointset]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
if so.labels_da[so.labels].has_key('da_idx'):
labels = so.labels_da[so.labels]['da_idx']
else:
labels = 0
f.write(surfscript % (so.pointset_da[so.pointset]['name'],
so.pointset_da[so.pointset]['da_idx'],
so.faces_da[so.faces]['name'],
so.faces_da[so.faces]['da_idx'],
so.labels_da[so.labels]['name'],
labels))
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
class ShowVolumes(Action):
""" Open a new file in the text editor
"""
tooltip = "Create a volume"
description = "Create a volume"
# The WorkbenchWindow the action is attached to.
window = Any()
def perform(self, event=None):
from cvolume_action import VolumeParameter
from scripts import volslice
cfile = self.window.application.get_service('cviewer.plugins.cff2.cfile.CFile')
so = VolumeParameter(cfile)
so.edit_traits(kind='livemodal')
if True: #not so.pointset_da[so.pointset]['name'] == "None":
# if cancel, not create surface
# create a temporary file
import tempfile
myf = tempfile.mktemp(suffix='.py', prefix='my')
f=open(myf, 'w')
f.write(volslice % so.volumes[so.myvolume]['name'])
f.close()
self.window.workbench.edit(File(myf), kind=TextEditor,use_existing=False)
| bsd-3-clause | 8,568,969,510,888,972,000 | 30.545961 | 88 | 0.586402 | false | 3.857289 | false | false | false |
ninjasftw/libertyproxybeat | vendor/github.com/elastic/beats/packetbeat/tests/system/test_0053_amqp_channel_error.py | 11 | 1528 | from packetbeat import BaseTest
class Test(BaseTest):
def test_amqp_channel_error(self):
self.render_config_template(
amqp_ports=[5672],
)
self.run_packetbeat(pcap="amqp_channel_error.pcap",
debug_selectors=["amqp,tcp,publish"])
objs = self.read_output()
assert all([o["type"] == "amqp" for o in objs])
assert len(objs) == 3
assert objs[0]["method"] == "exchange.declare"
assert objs[0]["status"] == "OK"
assert objs[0]["amqp.exchange"] == "titres"
assert objs[0]["amqp.durable"] == True
assert objs[0]["amqp.exchange-type"] == "fanout"
assert objs[0]["amqp.passive"] == False
assert objs[0]["amqp.no-wait"] == True
assert objs[1]["method"] == "queue.declare"
assert objs[1]["status"] == "OK"
assert objs[1]["amqp.queue"] == "my_queue"
assert objs[1]["amqp.exclusive"] == True
assert objs[1]["amqp.no-wait"] == False
assert objs[1]["amqp.durable"] == False
assert objs[1]["amqp.auto-delete"] == False
assert objs[1]["amqp.passive"] == False
assert objs[2]["method"] == "channel.close"
assert objs[2]["status"] == "Error"
assert objs[2]["amqp.reply-code"] == 404
assert objs[2]["amqp.reply-text"] == "NOT_FOUND - no exchange 'plop' in vhost '/'"
assert objs[2]["amqp.class-id"] == 50
assert objs[2]["amqp.method-id"] == 20
| apache-2.0 | 7,701,928,987,731,056,000 | 37.179487 | 90 | 0.537958 | false | 3.537037 | false | false | false |
janusnic/creator | creator/macro.py | 2 | 20924 | # Copyright (C) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import creator.utils
import abc
import glob2
import os
import string
import sys
import weakref
class ContextProvider(object, metaclass=abc.ABCMeta):
"""
The *ContextProvider* is the interface class for rendering macros
providing the data necessary, eg. the value of variables. Some macro
functions, like ``$(wildcard ...)`, expect the context does provide
a macro ``$ProjectPath`` which should provide the directory that
contains the project files.
"""
@abc.abstractmethod
def has_macro(self, name):
"""
Args:
name (str): The name of the macro to check for existence.
Returns:
bool: True if the macro exists, False if not.
"""
return False
@abc.abstractmethod
def get_macro(self, name, default=NotImplemented):
"""
Args:
name (str): The name of the macro to retrieve.
default (any): The default value to be returned if the macro
can not be served. The default value is :class:`NotImplemented`
which causes this function to raise a :class:`KeyError` instead.
Returns:
ExpressionNode: The macro associated with the specified *name*.
Raises:
KeyError: If there is no macro with the specified name and the
*default* parameter has the value :class:`NotImplemented`.
"""
if default is NotImplemented:
raise KeyError(name)
return default
@abc.abstractmethod
def get_namespace(self):
"""
Returns:
str: The name of the context that is used to identify it globally.
"""
raise NotImplementedError
class MutableContext(ContextProvider):
"""
This implementation of the :class:`ContextProvider` interface
enables reading and writing macros via the Python ``__getitem__()``
and ``__setitem__()`` interface and stores these internally. If a
string is set with ``__setitem__()``, it will automatically be parsed
into an expression tree.
Attributes:
macros (dict of str -> ExpressionNode): The internal dictionary
mapping the macro names with the actual macro objects.
"""
def __init__(self):
super().__init__()
self.macros = {}
def __setitem__(self, name, value):
if isinstance(value, str):
value = parse(value, self)
elif not isinstance(value, ExpressionNode):
message = 'value must be str or ExpressionNode'
raise TypeError(message, type(value))
# Make sure the macro does not contain a reference to itself.
# It will be resolved by expanding the original value immediately
# in the expression hierarchy.
old_value = self.macros.get(name) or TextNode('')
for ref_name in self.get_aliases(name):
value = value.substitute(ref_name, old_value)
self.macros[name] = value
def __delitem__(self, name):
try:
del self.macros[name]
except KeyError:
pass
def get_aliases(self, name):
"""
This function can be implemented by subclasses to specify under
what aliases the same macro can be found. The default implementation
simply returns *name*.
Args:
name (str): The name that was passed to :meth:`__setitem__`.
Returns:
list of str: A list of aliases.
"""
return [name]
def function(self, func):
"""
Decorator for a Python callable to be wrapped in a :class:`Function`
expression node and assigned to the *MutableContext*.
"""
self.macros[func.__name__] = Function(func)
return self.macros[func.__name__]
def has_macro(self, name):
return name in self.macros
def get_macro(self, name, default=NotImplemented):
if name in self.macros:
return self.macros[name]
elif default is not NotImplemented:
return default
else:
raise KeyError(name)
def get_namespace(self, name):
raise NotImplementedError
class ChainContext(ContextProvider):
"""
This context chains multiple :class:`ContextProvider`s.
"""
def __init__(self, *contexts):
super().__init__()
self.contexts = []
for context in contexts:
if context is not None:
if not isinstance(context, ContextProvider):
raise TypeError('expected ContextProvider', type(context))
self.contexts.append(context)
def has_macro(self, name):
for context in self.contexts:
if contexts.has_macro(name):
return True
return False
def get_macro(self, name, default=NotImplemented):
for context in self.contexts:
try:
return context.get_macro(name)
except KeyError:
pass
if default is NotImplemented:
raise KeyError(name)
return default
def get_namespace(self, name):
for context in self.contexts:
try:
return context.get_namespace(name)
except KeyError:
pass
raise KeyError(name)
class StackFrameContext(ContextProvider):
"""
This :class:`ContextProvider` implementation exposes the contents
of a Python stack frame.
Args:
stack_depth (int): The number of stacks to go backwards from the
calling stack frame to reach the frame that is supposed to be
exposed by this context.
"""
def __init__(self, stack_depth=0):
super().__init__()
frame = sys._getframe()
for i in range(stack_depth + 1):
frame = frame.f_back
self.frame = frame
def has_macro(self, name):
try:
self.get_macro(name)
except KeyError:
return False
return True
def get_macro(self, name, default=NotImplemented):
frame = self.frame
if name in frame.f_locals:
value = frame.f_locals[name]
elif name in frame.f_globals:
value = frame.f_globals[name]
elif default is not NotImplemented:
return default
else:
raise KeyError(name)
if isinstance(value, str):
value = creator.macro.TextNode(str(value))
if not isinstance(value, creator.macro.ExpressionNode):
raise KeyError(name)
return value
def get_namespace(self, name):
raise KeyError(name)
class ExpressionNode(object, metaclass=abc.ABCMeta):
"""
Base class for macro expression nodes that can be evaluated with
a :class:`ContextProvider` and rendered to a string.
"""
@abc.abstractmethod
def eval(self, context, args):
"""
Evaluate the expression node given the specified context and
function call arguments into a string.
Args:
context (ContextProvider): The context to evaluate with.
args (list of ExpressionNode): A list of arguments that should
be taken into account for the evaluation.
Returns:
str: The evaluated macro.
"""
raise NotImplementedError
@abc.abstractmethod
def substitute(self, ref_name, node):
"""
This function must be implemented by nodes that expand a variable
name like the :meth:`VarNode` and must replace any occurence that
expands the reference named by *ref_name* with *node*.
Args:
ref_name (str): The name of the variable. May contain a double
colon ``:`` to separate namespace and variable name.
node (ExpressionNode): The node to insert in place.
Returns:
ExpressionNode: *self* or *node*.
"""
return self
@abc.abstractmethod
def copy(self, new_context):
"""
Create a copy of the node and return it. If *new_context* is not
None, its a *ContextProvider* that should be used inside the *VarNode*s
instead of their previous.
Args:
new_context (ContextProvider or None): The new context.
Returns:
ExpressionNode: The copy of the node.
"""
raise NotImplementedError
class TextNode(ExpressionNode):
"""
The *TextNode* simply evaluates into the same text it was initialized
with. It does not the context to evaluate.
Attributes:
text (str): The text of the node.
"""
def __init__(self, text):
if not isinstance(text, str):
raise TypeError('text must be str', type(text))
super().__init__()
self.text = text
def eval(self, context, args):
return self.text
def substitute(self, ref_name, node):
return self
def copy(self, new_context):
return TextNode(self.text)
class ConcatNode(ExpressionNode):
"""
This expression node can contain a number of other nodes which are
simply concatenated on evaluation. It also implements a parse-time
performance improvement when appending raw text to the node as it
will simply update the last :class:`TextNode` (if present) instead
of creating a new node for each chunk.
Attributes:
nodes (list of ExpressionNode): The list of nodes.
"""
def __init__(self, nodes=None):
super().__init__()
self.nodes = [] if nodes is None else nodes
def append(self, node):
"""
Appends a :class:`ExpressionNode` or text to this node.
Args:
node (ExpressionNode or str): The node or text to add.
"""
if type(node) is TextNode:
text = node.text
elif isinstance(node, str):
text = node
node = None
else:
text = None
if text is not None:
if self.nodes and isinstance(self.nodes[-1], TextNode):
self.nodes[-1].text += text
return
if node is None:
node = TextNode(text)
self.nodes.append(node)
def eval(self, context, args):
return ''.join(n.eval(context, args) for n in self.nodes)
def substitute(self, ref_name, node):
for i in range(len(self.nodes)):
self.nodes[i] = self.nodes[i].substitute(ref_name, node)
return self
def copy(self, new_context):
nodes = [n.copy(new_context) for n in self.nodes]
return ConcatNode(nodes)
class VarNode(ExpressionNode):
"""
This expression node implements a variable expansion or function call.
"""
def __init__(self, varname, args, context):
super().__init__()
self.varname = varname
self.args = args
self.context = weakref.ref(context)
def eval(self, context, args):
if self.context:
context = self.context()
# Evaluate the arguments to the function.
sub_args = [TextNode(n.eval(context, args)) for n in self.args]
# Does the identifier access an argument?
arg_index = None
try:
arg_index = int(self.varname)
except ValueError:
pass
if arg_index is not None and arg_index >= 0 and arg_index < len(args):
return args[arg_index].eval(context, sub_args).strip()
# Try to get the macro and evaluate it.
try:
macro = context.get_macro(self.varname)
except KeyError:
return ''
return macro.eval(context, sub_args).strip()
def substitute(self, ref_name, node):
if ref_name == self.varname:
return node
elif self.context():
namespace = self.context().get_namespace()
if ref_name == creator.utils.create_var(namespace, self.varname):
return node
for i in range(len(self.args)):
self.args[i] = self.args[i].substitute(ref_name, node)
return self
def copy(self, new_context):
args = [n.copy(new_context) for n in self.args]
return VarNode(self.varname, args, new_context)
class Function(ExpressionNode):
"""
This class can be used to wrap a Python function to make it a
function that can be called from a macro. The wrapped function
must accept the same arguments as :meth:`eval`.
"""
def __init__(self, func):
super().__init__()
self.func = func
@property
def name(self):
return self.func.__name__
def eval(self, context, args):
return self.func(context, args)
def substitute(self, ref_name, node):
return self
def copy(self, new_context):
return self
class Parser(object):
"""
This class implements the process of parsing a string into an
expression node hierarchy.
"""
CHARS_WHITESPACE = string.whitespace
CHARS_IDENTIFIER = string.ascii_letters + string.digits + '-_.<@:'
CHAR_POPEN = '('
CHAR_PCLOSE = ')'
CHAR_BOPEN = '{'
CHAR_BCLOSE = '}'
CHAR_NAMESPACEACCESS = ':'
CHAR_ARGSEP = ','
def parse(self, text, context):
"""
Args:
text (str): The text to parse into an expression tree.
Returns:
ConcatNode: The root node of the hierarchy.
"""
if context is not None and not isinstance(context, ContextProvider):
raise TypeError('context must be None or ContextProvider', type(context))
scanner = creator.utils.Scanner(text.strip())
return self._parse_arg(scanner, context, closing_at='')
def _parse_arg(self, scanner, context, closing_at):
root = ConcatNode()
char = scanner.char
while scanner and char not in closing_at:
if char == '$':
char = scanner.next()
node = None
if char != '$':
cursor = scanner.state()
node = self._parse_macro(scanner, context)
if not node:
scanner.restore(cursor)
if node:
root.append(node)
char = scanner.char
else:
root.append('$')
char = scanner.next()
elif char == '\\':
char = scanner.next()
if char:
root.append(char)
char = scanner.next()
else:
root.append('\\')
else:
root.append(char)
char = scanner.next()
return root
def _parse_macro(self, scanner, context):
# Check if a shortcut identifier was used.
shortcut = None
if scanner.char in Globals.shortcut_map:
shortcut = Globals.shortcut_map[scanner.char]
scanner.next()
is_call = False
is_braced = False
# Check if we have an opening parenthesis (function call).
if scanner.char == self.CHAR_POPEN:
is_call = True
closing = self.CHAR_PCLOSE
scanner.next()
# Or if we got braces (enclosed variable expansion).
elif scanner.char == self.CHAR_BOPEN:
is_braced = True
closing = self.CHAR_BCLOSE
scanner.next()
scanner.consume_set(self.CHARS_WHITESPACE)
# If a shortcut was used and this is a call, we already know
# the function that is used to call.
if shortcut and is_call:
varname = shortcut
# Read the variable or function name that is referenced
# in this expression.
else:
varname = scanner.consume_set(self.CHARS_IDENTIFIER)
if not varname:
return None
# If its a function call, we need to read in the arguments.
if is_call:
args = []
scanner.consume_set(self.CHARS_WHITESPACE)
closing_at = closing + self.CHAR_ARGSEP
while scanner.char and scanner.char != closing:
node = self._parse_arg(scanner, context, closing_at)
args.append(node)
if scanner.char == self.CHAR_ARGSEP:
scanner.next()
elif scanner.char == closing:
break
# Skip whitespace after the argument separator.
scanner.consume_set(self.CHARS_WHITESPACE)
if scanner.char != closing:
return None
scanner.next()
return VarNode(varname, args, context)
# If its braced, we only need the name of the variable that
# is being referenced.
elif is_braced:
scanner.consume_set(self.CHARS_WHITESPACE)
if scanner.char != closing:
return None
scanner.next()
node = VarNode(varname, [], context)
if shortcut:
node = VarNode(shortcut, [node], context)
return node
parser = Parser()
parse = parser.parse
class Globals:
shortcut_map = {
'"': 'quote',
'!': 'quotesplit',
'*': 'wildcard',
}
@Function
def addprefix(context, args):
if len(args) != 2:
message = 'addprefix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
prefix = args[0].eval(context, [])
items = creator.utils.split(args[1].eval(context, []))
items = [prefix + x for x in items]
return creator.utils.join(items)
@Function
def addsuffix(context, args):
if len(args) != 2:
message = 'addsuffix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
suffix = args[0].eval(context, [])
items = creator.utils.split(args[1].eval(context, []))
items = [x + suffix for x in items]
return creator.utils.join(items)
@Function
def quote(context, args):
items = [n.eval(context, []).strip() for n in args]
items = [creator.utils.quote(x) for x in items]
return ' '.join(items)
@Function
def quoteall(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = [creator.utils.quote(x) for x in creator.utils.split(items)]
return creator.utils.join(items)
@Function
def quotesplit(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = creator.utils.split(items)
items = [creator.utils.quote(x) for x in items]
return ' '.join(items)
@Function
def subst(context, args):
if len(args) != 3:
message = 'subst requires 3 arguments, got {0}'.format(len(args))
raise TypeError(message)
subject, replacement, items = [n.eval(context, []).strip() for n in args]
items = creator.utils.split(items)
items = [x.replace(subject, replacement) for x in items]
return creator.utils.join(items)
@Function
def split(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
return ' '.join(creator.utils.split(items))
@Function
def wildcard(context, args):
patterns = [n.eval(context, []).strip() for n in args]
items = []
for pattern in patterns:
items.extend(glob2.glob(pattern))
items.sort()
return creator.utils.join(items)
@Function
def suffix(context, args):
if len(args) != 2:
message = 'suffix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, suffix = [n.eval(context, []).strip() for n in args]
items = creator.utils.split(items)
items = [creator.utils.set_suffix(x, suffix) for x in items]
return creator.utils.join(items)
@Function
def prefix(context, args):
if len(args) != 2:
message = 'prefix requires 2 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, prefix = [n.eval(context, []).strip() for n in args]
result = []
for item in creator.utils.split(items):
dirname, basename = os.path.split(item)
basename = prefix + basename
result.append(os.path.join(dirname, basename))
return creator.utils.join(result)
@Function
def move(context, args):
if len(args) != 3:
message = 'move requires 3 arguments, got {0}'.format(len(args))
raise TypeError(message)
items, base, new_base = [n.eval(context, []).strip() for n in args]
result = []
for item in creator.utils.split(items):
relpath = os.path.relpath(item, base)
result.append(os.path.join(new_base, relpath))
return creator.utils.join(result)
@Function
def dir(context, args):
items = ';'.join(n.eval(context, []) for n in args)
items = creator.utils.split(items)
return creator.utils.join(os.path.dirname(x) for x in items)
@Function
def normpath(context, args):
items = ';'.join(n.eval(context, []).strip() for n in args)
items = creator.utils.split(items)
return creator.utils.join(creator.utils.normpath(x) for x in items)
@Function
def upper(context, args):
if len(args) != 1:
message = 'upper expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return args[0].eval(context, []).upper()
@Function
def lower(context, args):
if len(args) != 1:
message = 'lower expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return args[0].eval(context, []).lower()
@Function
def capitalize(context, args):
if len(args) != 1:
message = 'lower expects exactly 1 argument, got {0}'.format(len(args))
raise TypeError(message)
return string.capwords(args[0].eval(context, []))
| mit | 8,583,543,912,065,867,000 | 28.142061 | 79 | 0.654034 | false | 3.986283 | false | false | false |
netroby/vitess | test/queryservice_tests/cases_framework.py | 9 | 7554 | import ast
import json
import os
import re
import time
import urllib2
import environment
import framework
import utils
def cases_iterator(cases):
for case in cases:
if isinstance(case, MultiCase):
for c in case:
yield c
else:
yield case
class Log(object):
def __init__(self, line):
self.line = line
try:
(self.method,
self.remote_address,
self.username,
self.start_time,
self.end_time,
self.total_time,
self.plan_type,
self.original_sql,
self.bind_variables,
self.number_of_queries,
self.rewritten_sql,
self.query_sources,
self.mysql_response_time,
self.waiting_for_connection_time,
self.rowcount,
self.size_of_response,
self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations,
self.error) = line.strip().split('\t')
except ValueError:
print "Wrong looking line: %r" % line
raise
def check(self, case):
if isinstance(case, basestring):
return []
if isinstance(case, MultiCase):
return sum((self.check(subcase) for subcase in case.sqls_and_cases), [])
failures = []
for method in dir(self):
if method.startswith('check_'):
if not case.is_testing_cache and method.startswith('check_cache_'):
continue
fail = getattr(self, method)(case)
if fail:
failures.append(fail)
return failures
def fail(self, reason, should, is_):
return "FAIL: %s: %r != %r" % (reason, should, is_)
def check_original_sql(self, case):
# The following is necessary because Python and Go use different
# notations for bindings: %(foo)s vs :foo.
sql = re.sub(r'%\((\w+)\)s', r':\1', case.sql)
# Eval is a cheap hack - Go always uses doublequotes, Python
# prefers single quotes.
if sql != eval(self.original_sql):
return self.fail('wrong sql', case.sql, self.original_sql)
def check_rowcount(self, case):
if case.rowcount is not None and int(self.rowcount) != case.rowcount:
return self.fail("Bad rowcount", case.rowcount, self.rowcount)
def check_cache_hits(self, case):
if case.cache_hits is not None and int(self.cache_hits) != case.cache_hits:
return self.fail("Bad Cache Hits", case.cache_hits, self.cache_hits)
def check_cache_absent(self, case):
if case.cache_absent is not None and int(self.cache_absent) != case.cache_absent:
return self.fail("Bad Cache Absent", case.cache_absent, self.cache_absent)
def check_cache_misses(self, case):
if case.cache_misses is not None and int(self.cache_misses) != case.cache_misses:
return self.fail("Bad Cache Misses", case.cache_misses, self.cache_misses)
def check_cache_invalidations(self, case):
if case.cache_invalidations is not None and int(self.cache_invalidations) != case.cache_invalidations:
return self.fail("Bad Cache Invalidations", case.cache_invalidations, self.cache_invalidations)
def check_query_plan(self, case):
if case.query_plan is not None and case.query_plan != self.plan_type:
return self.fail("Bad query plan", case.query_plan, self.plan_type)
def check_rewritten_sql(self, case):
if case.rewritten is None:
return
queries = []
for q in ast.literal_eval(self.rewritten_sql).split(';'):
q = q.strip()
if q and q != '*/':
queries.append(q)
if case.rewritten != queries:
return self.fail("Bad rewritten SQL", case.rewritten, queries)
def check_number_of_queries(self, case):
if case.rewritten is not None and int(self.number_of_queries) != len(case.rewritten):
return self.fail("wrong number of queries", len(case.rewritten), int(self.number_of_queries))
class Case(object):
def __init__(self, sql, bindings=None, result=None, rewritten=None, doc='',
rowcount=None, cache_table=None, query_plan=None, cache_hits=None,
cache_misses=None, cache_absent=None, cache_invalidations=None,
remote_address="[::1]"):
# For all cache_* parameters, a number n means "check this value
# is exactly n," while None means "I am not interested in this
# value, leave it alone."
self.sql = sql
self.bindings = bindings or {}
self.result = result
if isinstance(rewritten, basestring):
rewritten = [rewritten]
self.rewritten = rewritten
self.rowcount = rowcount
self.doc = doc
self.query_plan = query_plan
self.cache_table = cache_table
self.cache_hits= cache_hits
self.cache_misses = cache_misses
self.cache_absent = cache_absent
self.cache_invalidations = cache_invalidations
self.remote_address = remote_address
@property
def is_testing_cache(self):
return any(attr is not None for attr in [self.cache_hits,
self.cache_misses,
self.cache_absent,
self.cache_invalidations])
def run(self, cursor, env):
failures = []
env.querylog.reset()
if self.is_testing_cache:
tstart = self.table_stats(env)
if self.sql in ('begin', 'commit', 'rollback'):
getattr(cursor.connection, self.sql)()
else:
cursor.execute(self.sql, self.bindings)
if self.result is not None:
result = list(cursor)
if self.result != result:
failures.append("%r:\n%s !=\n%s" % (self.sql, self.result, result))
for i in range(30):
lines = env.querylog.tailer.readLines()
if not lines:
time.sleep(0.1)
continue
break
for line in lines:
case_failures = Log(line).check(self)
if case_failures:
failures.extend(case_failures)
if self.is_testing_cache:
tdelta = self.table_stats_delta(tstart, env)
if self.cache_hits is not None and tdelta['Hits'] != self.cache_hits:
failures.append("Bad Cache Hits: %s != %s" % (self.cache_hits, tdelta['Hits']))
if self.cache_absent is not None and tdelta['Absent'] != self.cache_absent:
failures.append("Bad Cache Absent: %s != %s" % (self.cache_absent, tdelta['Absent']))
if self.cache_misses is not None and tdelta['Misses'] != self.cache_misses:
failures.append("Bad Cache Misses: %s != %s" % (self.cache_misses, tdelta['Misses']))
if self.cache_invalidations is not None and tdelta['Invalidations'] != self.cache_invalidations:
failures.append("Bad Cache Invalidations: %s != %s" % (self.cache_invalidations, tdelta['Invalidations']))
return failures
def table_stats_delta(self, old, env):
result = {}
new = self.table_stats(env)
for k, v in new.items():
result[k] = new[k] - old[k]
return result
def table_stats(self, env):
return env.http_get('/debug/table_stats')[self.cache_table]
def __str__(self):
return "Case %r" % self.doc
class MultiCase(object):
def __init__(self, doc, sqls_and_cases):
self.doc = doc
self.sqls_and_cases = sqls_and_cases
def run(self, cursor, env):
failures = []
for case in self.sqls_and_cases:
if isinstance(case, basestring):
if case in ('begin', 'commit', 'rollback'):
getattr(cursor.connection, case)()
else:
cursor.execute(case)
continue
failures += case.run(cursor, env)
return failures
def __iter__(self):
return iter(self.sqls_and_cases)
def __str__(self):
return "MultiCase: %s" % self.doc
| bsd-3-clause | -104,220,655,460,630,460 | 31.843478 | 114 | 0.631454 | false | 3.568257 | false | false | false |
ursky/metaWRAP | bin/metawrap-scripts/binning_refiner.py | 1 | 13714 | #!/usr/bin/env python2.7
# Copyright (C) 2017, Weizhi Song, Torsten Thomas.
# [email protected]
# [email protected]
# Binning_refiner is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Binning_refiner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# metaWRAP author notes:
# I thank the original creator of this script! This is a great idea! To make
# this script more usable as part of the metaWRAP binning pipeline, I
# removed unnecessary visual aaspects of the original Bin_refiner script
# and made it python2 compatible.
# Check out the original program: https://github.com/songweizhi/Binning_refiner
# And the publication: https://www.ncbi.nlm.nih.gov/pubmed/28186226
import os
import glob
import shutil
import argparse
from time import sleep
from sys import stdout
from Bio import SeqIO
##################################################### CONFIGURATION ####################################################
parser = argparse.ArgumentParser()
parser.add_argument('-1',
required=True,
help='first bin folder name')
parser.add_argument('-2',
required=True,
help='second bin folder name')
parser.add_argument('-3',
required=False,
help='third bin folder name')
parser.add_argument('-o',
required=True,
help='output folder name')
parser.add_argument('-ms',
required=False,
default=524288,
type=int,
help='(optional) minimum size for refined bins, default = 524288 (0.5Mbp)')
args = vars(parser.parse_args())
output_dir = args['o']
if output_dir[-1]=='/':
output_dir=output_dir[:-1]
input_bin_folder_1 = args['1']
if input_bin_folder_1[-1] == '/':
input_bin_folder_1 = input_bin_folder_1[:-1]
input_bin_folder_2 = args['2']
if input_bin_folder_2[-1] == '/':
input_bin_folder_2 = input_bin_folder_2[:-1]
if args['3'] != None:
input_bin_folder_3 = args['3']
if input_bin_folder_3[-1] == '/':
input_bin_folder_3 = input_bin_folder_3[:-1]
bin_size_cutoff = args['ms']
bin_size_cutoff_MB = float("{0:.2f}".format(bin_size_cutoff / (1024 * 1024)))
# get input bin folder list
input_bin_folder_list = []
if args['3'] == None:
print('Specified 2 input bin sets: -1 %s -2 %s' % (input_bin_folder_1, input_bin_folder_2))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2]
else:
print('Specified 3 input bin sets: -1 %s -2 %s -3 %s' % (input_bin_folder_1, input_bin_folder_2, input_bin_folder_3))
input_bin_folder_list = [input_bin_folder_1, input_bin_folder_2, input_bin_folder_3]
################################################ Define folder/file name ###############################################
wd = os.getcwd()
output_folder = output_dir
pwd_output_folder = '%s/%s' % (wd, output_folder)
########################################################################################################################
# get bin name list
bin_folder_1_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_1)
bin_folder_2_bins_files = '%s/%s/*.fa*' % (wd, input_bin_folder_2)
# check input files
folder_bins_dict = {}
all_input_bins_list = []
all_input_bins_number_list = []
for bin_folder in input_bin_folder_list:
bins_files = '%s/%s/*.fa*' % (wd, bin_folder)
bin_folder_bins = [os.path.basename(file_name) for file_name in glob.glob(bins_files)]
all_input_bins_list.append(bin_folder_bins)
all_input_bins_number_list.append(len(bin_folder_bins))
folder_bins_dict[bin_folder] = bin_folder_bins
if len(bin_folder_bins) == 0:
print('No input bin detected from %s folder, please double-check!' % (bin_folder))
exit()
bin_folder_bins_ext_list = []
for bin in bin_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(bin)
bin_folder_bins_ext_list.append(bin_file_ext)
bin_folder_bins_ext_list_uniq = []
for each in bin_folder_bins_ext_list:
if each not in bin_folder_bins_ext_list_uniq:
bin_folder_bins_ext_list_uniq.append(each)
else:
pass
# check whether bins in the same folder have same extension, exit if not
if len(bin_folder_bins_ext_list_uniq) > 1:
print('Different file extensions were found from %s bins, please use same extension (fa, fas or fasta) '
'for all bins in the same folder.' % (bin_folder))
exit()
else:
pass
# create output folder
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
os.mkdir(output_folder)
else:
os.mkdir(output_folder)
# create folder to hold bins with renamed contig name
combined_all_bins_file = '%s/%s/combined_all_bins.fasta' % (wd, output_folder)
separator = '__'
for each_folder in input_bin_folder_list:
sleep(1)
print('Add folder/bin name to contig name for %s bins' % each_folder)
os.mkdir('%s/%s/%s_new' % (wd, output_folder, each_folder))
# add binning program and bin id to metabat_bin's contig name
each_folder_bins = folder_bins_dict[each_folder]
for each_bin in each_folder_bins:
bin_file_name, bin_file_ext = os.path.splitext(each_bin)
each_bin_content = SeqIO.parse('%s/%s/%s' % (wd, each_folder, each_bin), 'fasta')
new = open('%s/%s/%s_new/%s_%s.fasta' % (wd, output_folder, each_folder, each_folder, bin_file_name), 'w')
for each_contig in each_bin_content:
each_contig_new_id = '%s%s%s%s%s' % (each_folder, separator, bin_file_name, separator, each_contig.id)
each_contig.id = each_contig_new_id
each_contig.description = ''
SeqIO.write(each_contig, new, 'fasta')
new.close()
# Combine all new bins
os.system('cat %s/%s/%s_new/*.fasta > %s/%s/combined_%s_bins.fa' % (wd, output_folder, each_folder, wd, output_folder, each_folder))
os.system('rm -r %s/%s/%s_new' % (wd, output_folder, each_folder))
# combine all modified bins together
sleep(1)
print('Combine all bins together')
if len(input_bin_folder_list) == 2:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
os.system('cat %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, combined_all_bins_file))
if len(input_bin_folder_list) == 3:
pwd_combined_folder_1_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
pwd_combined_folder_2_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_2)
pwd_combined_folder_3_bins = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_3)
os.system('cat %s %s %s > %s' % (pwd_combined_folder_1_bins, pwd_combined_folder_2_bins, pwd_combined_folder_3_bins, combined_all_bins_file))
combined_all_bins = SeqIO.parse(combined_all_bins_file, 'fasta')
contig_bin_dict = {}
contig_length_dict = {}
for each in combined_all_bins:
each_id_split = each.id.split(separator)
folder_name = each_id_split[0]
bin_name = each_id_split[1]
contig_id = each_id_split[2]
length = len(each.seq)
if contig_id not in contig_bin_dict:
contig_bin_dict[contig_id] = ['%s%s%s' % (folder_name, separator, bin_name)]
contig_length_dict[contig_id] = length
elif contig_id in contig_bin_dict:
contig_bin_dict[contig_id].append('%s%s%s' % (folder_name, separator, bin_name))
contig_assignments_file = '%s/%s/contig_assignments.txt' % (wd, output_folder)
contig_assignments = open(contig_assignments_file, 'w')
for each in contig_bin_dict:
if len(contig_bin_dict[each]) == len(input_bin_folder_list):
contig_assignments.write('%s\t%s\t%s\n' % ('\t'.join(contig_bin_dict[each]), each, contig_length_dict[each]))
contig_assignments.close()
contig_assignments_file_sorted = '%s/%s/contig_assignments_sorted.txt' % (wd, output_folder)
contig_assignments_file_sorted_one_line = '%s/%s/contig_assignments_sorted_one_line.txt' % (wd, output_folder)
os.system('cat %s | sort > %s' % (contig_assignments_file, contig_assignments_file_sorted))
contig_assignments_sorted = open(contig_assignments_file_sorted)
contig_assignments_sorted_one_line = open(contig_assignments_file_sorted_one_line, 'w')
current_match = ''
current_match_contigs = []
current_length_total = 0
n = 1
for each in contig_assignments_sorted:
each_split = each.strip().split('\t')
current_contig = each_split[-2]
current_length = int(each_split[-1])
matched_bins = '\t'.join(each_split[:-2])
if current_match == '':
current_match = matched_bins
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match == matched_bins:
current_match_contigs.append(current_contig)
current_length_total += current_length
elif current_match != matched_bins:
refined_bin_name = 'refined_bin%s' % n
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
n += 1
current_match = matched_bins
current_match_contigs = []
current_match_contigs.append(current_contig)
current_length_total = 0
current_length_total += current_length
if current_length_total >= bin_size_cutoff:
contig_assignments_sorted_one_line.write('Refined_%s\t%s\t%sbp\t%s\n' % (n, current_match, current_length_total,'\t'.join(current_match_contigs)))
else:
n -= 1
contig_assignments_sorted_one_line.close()
refined_bin_number = n
sleep(1)
print('The number of refined bins: %s' % refined_bin_number)
# Export refined bins and prepare input for GoogleVis
sleep(1)
print('Exporting refined bins...')
separated_1 = '%s/%s/Refined_bins_sources_and_length.txt' % (wd, output_folder)
separated_2 = '%s/%s/Refined_bins_contigs.txt' % (wd, output_folder)
googlevis_input_file = '%s/%s/GoogleVis_Sankey_%sMbp.csv' % (wd, output_folder, bin_size_cutoff_MB)
os.mkdir('%s/%s/Refined' % (wd, output_folder))
refined_bins = open(contig_assignments_file_sorted_one_line)
googlevis_input_handle = open(googlevis_input_file, 'w')
separated_1_handle = open(separated_1, 'w')
separated_2_handle = open(separated_2, 'w')
googlevis_input_handle.write('C1,C2,Length (Mbp)\n')
for each_refined_bin in refined_bins:
each_refined_bin_split = each_refined_bin.strip().split('\t')
each_refined_bin_name = each_refined_bin_split[0]
each_refined_bin_length = 0
each_refined_bin_contig = []
if len(input_bin_folder_list) == 2:
each_refined_bin_source = each_refined_bin_split[1:3]
each_refined_bin_length = int(each_refined_bin_split[3][:-2])
each_refined_bin_contig = each_refined_bin_split[4:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
if len(input_bin_folder_list) == 3:
each_refined_bin_source = each_refined_bin_split[1:4]
each_refined_bin_length = int(each_refined_bin_split[4][:-2])
each_refined_bin_contig = each_refined_bin_split[5:]
separated_1_handle.write('%s\t%sbp\t%s\n' % (each_refined_bin_name, each_refined_bin_length, '\t'.join(each_refined_bin_source)))
separated_2_handle.write('%s\n%s\n' % (each_refined_bin_name, '\t'.join(each_refined_bin_contig)))
each_refined_bin_length_mbp = float("{0:.2f}".format(each_refined_bin_length / (1024 * 1024)))
m = 0
while m < len(each_refined_bin_source)-1:
googlevis_input_handle.write('%s,%s,%s\n' % (each_refined_bin_source[m], each_refined_bin_source[m+1], each_refined_bin_length_mbp))
m += 1
stdout.write('\rExtracting refined bin: %s.fasta' % each_refined_bin_name)
refined_bin_file = '%s/%s/Refined/%s.fasta' % (wd, output_folder, each_refined_bin_name)
refined_bin_handle = open(refined_bin_file, 'w')
input_contigs_file = '%s/%s/combined_%s_bins.fa' % (wd, output_folder, input_bin_folder_1)
input_contigs = SeqIO.parse(input_contigs_file, 'fasta')
for each_input_contig in input_contigs:
each_input_contig_id = each_input_contig.id.split(separator)[-1]
if each_input_contig_id in each_refined_bin_contig:
each_input_contig.id = each_input_contig_id
each_input_contig.description = ''
SeqIO.write(each_input_contig, refined_bin_handle, 'fasta')
refined_bin_handle.close()
googlevis_input_handle.close()
separated_1_handle.close()
separated_2_handle.close()
# remove temporary files
sleep(1)
print('\nDeleting temporary files')
os.system('rm %s' % contig_assignments_file)
os.system('rm %s' % (combined_all_bins_file))
os.system('rm %s/%s/*.fa' % (wd, output_folder))
os.system('rm %s' % (contig_assignments_file_sorted))
os.system('rm %s' % (contig_assignments_file_sorted_one_line))
sleep(1)
print('\nAll done!')
| mit | 2,044,512,113,421,620,700 | 41.722741 | 158 | 0.645545 | false | 3.061844 | false | false | false |
mobarski/sandbox | kv/kcv_x1_test.py | 1 | 5709 | import unittest
from kcv_x1 import KCV
def pairs(text,dlm=' ',dlm2=':',cast=str):
for row in text.split(dlm):
cols = row.split(dlm2)
yield cols[0],cast(cols[1])
class test_memory_kcv(unittest.TestCase):
def setUp(self):
pass
#self.db = KCV()
#self.db.set_items('k1',pairs('a:3 b:1 c:2 d:0 e:5 f:4'))
### WRITE ###
def test_set(self):
db = KCV()
db.set('k1','c1',42)
db.set('k1','c2',4.2)
db.set('k1','c3','fourty two')
db.set('k1',1,'one')
db.set(2,'c2','two')
self.assertEqual(db.get('k1','c1'),42)
self.assertEqual(db.get('k1','c2'),4.2)
self.assertEqual(db.get('k1','c3'),'fourty two')
self.assertEqual(db.get('k1','c4'),None)
self.assertEqual(db.get('k1',1),'one')
self.assertEqual(db.get(2,'c2'),'two')
def test_set_items(self):
db = KCV()
db.set_items('k1',pairs('a:aa b:bb c:cc'))
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k1','b'),'bb')
self.assertEqual(db.get('k1','c'),'cc')
def test_incr(self):
db = KCV()
db.incr('k1','c1',2)
self.assertEqual(db.get('k1','c1'),2,'incorrect INCR of new key new col')
db.incr('k1','c1',3)
self.assertEqual(db.get('k1','c1'),5,'incorrect INCR of existing key existing col')
db.incr('k1','c1',-1)
self.assertEqual(db.get('k1','c1'),4,'incorrect INCR with negative value')
def test_incr_items(self):
db = KCV()
db.incr_items('k1',pairs('a:11 b:22 c:33',cast=int))
self.assertEqual(db.get('k1','a'),11)
self.assertEqual(db.get('k1','b'),22)
self.assertEqual(db.get('k1','c'),33)
db.incr_items('k1',pairs('a:1 b:2 c:3',cast=int))
self.assertEqual(db.get('k1','a'),12)
self.assertEqual(db.get('k1','b'),24)
self.assertEqual(db.get('k1','c'),36)
def test_delete(self):
db = KCV()
db.set('k1','c1',123)
db.set('k1','c2',321)
self.assertEqual(db.get('k1','c1'),123)
self.assertEqual(db.get('k1','c2'),321)
db.delete('k1','c1')
self.assertEqual(db.get('k1','c1'),None)
self.assertEqual(db.get('k1','c2'),321)
db.delete('k1','c2')
self.assertEqual(db.get('k1','c2'),None)
def test_drop(self):
db = KCV()
db.set('k1','c1',1)
db.set('k2','c2',2)
db.set('k3','c3',3)
self.assertEqual(db.get('k1','c1'),1)
self.assertEqual(db.get('k2','c2'),2)
self.assertEqual(db.get('k3','c3'),3)
db.drop()
db.create()
self.assertEqual(db.get('k1','c1'),None)
self.assertEqual(db.get('k2','c2'),None)
self.assertEqual(db.get('k3','c3'),None)
### READ ###
def test_get(self):
db = KCV()
db.set('k1','c1',1)
db.set('k1','c2',2)
db.set('k2','c3',3)
self.assertEqual(db.get('k1','c1'),1)
self.assertEqual(db.get('k1','c2'),2)
self.assertEqual(db.get('k2','c3'),3)
self.assertEqual(db.get('k2','c4'),None)
self.assertEqual(db.get('k1','xxx',123),123)
self.assertEqual(db.get('xxx','zzz',123),123)
def test_items(self):
db = KCV()
db.set('k1','c1',1)
db.set('k1','c2',2)
db.set('k1','c3',3)
items = dict(db.items('k1'))
self.assertEqual(len(items),3)
self.assertEqual(items['c2'],2)
self.assertEqual(items['c3'],3)
self.assertEqual(items['c3'],3)
def test_scan_items(self):
db = KCV()
db.set('k11','c11',1)
db.set('k11','c12',2)
db.set('k12','c11',3)
db.set('k12','c12',4)
k_items = dict(db.scan_items('k1*','c11',cast=dict))
self.assertEqual(len(k_items),2)
self.assertEqual('k11' in k_items,True)
self.assertEqual('k12' in k_items,True)
self.assertEqual(len(k_items['k11']),1)
self.assertEqual(len(k_items['k12']),1)
self.assertEqual(k_items['k11']['c11'],1)
self.assertEqual(k_items['k12']['c11'],3)
def test_scan(self):
db = KCV()
db.set('k11','c11',1)
db.set('k11','c12',2)
db.set('k12','c11',3)
db.set('k12','c12',4)
kcv = list(db.scan(order='kaca'))
self.assertEqual(len(kcv),4)
self.assertEqual(kcv[0],('k11','c11',1))
self.assertEqual(kcv[1],('k11','c12',2))
self.assertEqual(kcv[2],('k12','c11',3))
self.assertEqual(kcv[3],('k12','c12',4))
k = list(db.scan(mode='k',order='ka'))
self.assertEqual(len(k),2)
self.assertEqual(k[0],'k11')
self.assertEqual(k[1],'k12')
def test_scan_int(self):
db = KCV()
db.set(1,11,111)
db.set(1,12,123)
db.set(2,22,222)
db.set(2,11,234)
db.set(3,11,345)
kcv = list(db.scan(k=1,order='kaca'))
self.assertEqual(len(kcv),2)
self.assertEqual(kcv[0],(1,11,111))
self.assertEqual(kcv[1],(1,12,123))
kcv = list(db.scan(kin=[1,3],cin=[11,12],order='kaca'))
self.assertEqual(len(kcv),3)
self.assertEqual(kcv[0],(1,11,111))
self.assertEqual(kcv[1],(1,12,123))
self.assertEqual(kcv[2],(3,11,345))
def test_col_store(self):
db = KCV()
db.set_items('k1',pairs('a:aa b:bb c:cc'))
db.set_items('k2',pairs('d:dd e:ee f:ff'))
db.set_items('k3',pairs('g:gg h:hh i:ii'))
db.to_col_store('kcv_x1_test.db',batch=4)
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k2','e'),'ee')
self.assertEqual(db.get('k3','i'),'ii')
db.drop()
db.create()
self.assertEqual(db.items('k1'), {})
self.assertEqual(db.items('k2'), {})
self.assertEqual(db.items('k3'), {})
db.from_col_store('kcv_x1_test.db')
self.assertEqual(db.get('k1','a'),'aa')
self.assertEqual(db.get('k2','e'),'ee')
self.assertEqual(db.get('k3','i'),'ii')
def test_block(self):
with KCV('kcv_x1_test.db') as db:
db.set('k1','c1',42)
db2=KCV('kcv_x1_test.db')
self.assertEqual(db2.get('k1','c1'),42)
def test_compact(self):
import os
path = 'kcv_x1_test.db'
db=KCV(path)
for i in range(1000):
db.set(i,i,i)
db.sync()
size1 = os.stat(path)[6]
db.drop()
db.sync()
size2 = os.stat(path)[6]
db.sync(compact=True)
size3 = os.stat(path)[6]
self.assertTrue(size3 < size2 <= size1)
if __name__=="__main__":
unittest.main()
| mit | -4,320,885,981,795,332,000 | 26.713592 | 85 | 0.60501 | false | 2.191555 | true | false | false |
uchicago-voth/cgmap | test/molecular_map_test/same_molecules_shared_map_naive_com/test_same_molecules_shared_map_naive_com.py | 1 | 2338 | #!/usr/bin/env python2
import sys
sys.path.append('../../../src/')
import cgmap as cg
import mdtraj as md
import md_check as check
############################### config #####################################
input_traj = "dppc.trr"
input_top = "dppc.pdb"
input_maps = ["mapping_bead_1_dppc",
"mapping_bead_2_dppc",
"mapping_bead_3_dppc"]
output_traj = "dppc.trr"
output_top = "dppc.pdb"
reference_traj = "dppc.trr"
reference_top = "dppc.pdb"
output_dir ='./output/'
input_dir ='./input/'
reference_dir ='./reference/'
#collection of names of molecules.
lipid_types = ['DPPC']
############################### config proc ################################
fq_input_maps = [ input_dir + loc for loc in input_maps ]
#read maps for each bead from files.
#list of lists of strings.
mapping_atom_names_dppc = [ [ l.strip() for l in open(mp_file,'r').readlines()]
for mp_file in fq_input_maps ]
#index strings for which to atom query the trajectory when making beads.
#list of lists of strings.
name_lists = [ " or ".join( ["name %s"%mn for mn in mapping_names ])
for mapping_names in mapping_atom_names_dppc ]
#names of cg beads created.
label_lists = ['DPH','DPM','DPT']
############################### run ########################################
### pull in trajectories
trj = md.load(input_dir + input_traj,top=input_dir + input_top)
#the types of each molecule in the trajectory.
molecule_types = [lipid_types.index(r.name) for r in trj.top.residues]
#actual map command
cg_trj = cg.map_molecules( trj = trj,
selection_list = [ name_lists ],
bead_label_list = [ label_lists ],
molecule_types = molecule_types,
split_shared_atoms = False)
cg_trj.save(output_dir + output_traj)
cg_trj[0].save(output_dir + output_top)
############################### check results ###############################
# reloading results from disk.
cg_traj = cg_trj.load(output_dir + output_traj,top=output_dir + output_top)
ref_cg_traj = cg_trj.load(reference_dir + reference_traj,
top=reference_dir + reference_top)
result=check.md_content_equality(cg_traj,ref_cg_traj)
sys.exit(check.check_result_to_exitval(result))
| apache-2.0 | -9,123,794,050,362,970,000 | 30.594595 | 80 | 0.557742 | false | 3.34 | false | false | false |
seims/SEIMS | preprocess/build_db.py | 2 | 5586 | #! /usr/bin/env python
# coding=utf-8
# Import all model parameters and spatial datasets to MongoDB
# Author: Junzhi Liu
# Revised: Liang-Jun Zhu
#
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from gridfs import *
from config import *
from find_sites import FindSites
from gen_subbasins import ImportSubbasinStatistics
from generate_stream_input import GenerateReachTable
from import_bmp_scenario import ImportBMPTables
from import_parameters import (ImportLookupTables, ImportModelConfiguration,
ImportParameters)
from weights_mongo import GenerateWeightDependentParameters, GenerateWeightInfo
def BuildMongoDB():
statusFile = WORKING_DIR + os.sep + FN_STATUS_MONGO
f = open(statusFile, 'w')
# build mongodb database
try:
conn = MongoClient(host=HOSTNAME, port=PORT)
except ConnectionFailure:
sys.stderr.write("Could not connect to MongoDB: %s" % ConnectionFailure.message)
sys.exit(1)
db = conn[SpatialDBName]
# import parameters information to MongoDB
ImportParameters(TXT_DB_DIR + os.sep + sqliteFile, db)
# import lookup tables from to MongoDB as GridFS. By LJ, 2016-6-13
ImportLookupTables(TXT_DB_DIR + os.sep + sqliteFile, db)
# import model configuration
ImportModelConfiguration(db)
f.write("10, Generating reach table...\n")
f.flush()
GenerateReachTable(WORKING_DIR, db, forCluster)
# prepare meteorology data
if not forCluster:
subbasinRaster = WORKING_DIR + os.sep + mask_to_ext # mask.tif
else:
subbasinRaster = WORKING_DIR + os.sep + subbasinOut # subbasin.tif
if stormMode:
meteoThiessenList = [PrecSitesThiessen]
meteoTypeList = [DataType_Precipitation]
else:
meteoThiessenList = [MeteorSitesThiessen, PrecSitesThiessen]
meteoTypeList = [DataType_Meteorology, DataType_Precipitation]
f.write("20, Finding nearby stations for each sub-basin...\n")
f.flush()
if not forCluster: # OMP version
basinFile = WORKING_DIR + os.sep + basinVec
nSubbasins = FindSites(db, ClimateDBName, basinFile, FLD_BASINID, meteoThiessenList, meteoTypeList, simuMode)
subbasinFile = WORKING_DIR + os.sep + DIR_NAME_SUBBSN + os.sep + subbasinVec # MPI version
nSubbasins = FindSites(db, ClimateDBName, subbasinFile, FLD_SUBBASINID, meteoThiessenList, meteoTypeList, simuMode)
print "Meteorology sites table generated done. Number of sub-basins:%d" % nSubbasins
if not forCluster: # changed by LJ, SubbasinID is 0 means the whole basin!
nSubbasins = 0
# import raster data to MongoDB
f.write("40, Importing raster to MongoDB...\n")
f.flush()
tifFolder = WORKING_DIR + os.sep + DIR_NAME_TIFFIMPORT
if not os.path.exists(tifFolder):
os.mkdir(tifFolder)
subbasinStartID = 1
if not forCluster:
subbasinStartID = 0
for i in range(subbasinStartID, nSubbasins + 1):
subdir = tifFolder + os.sep + str(i)
if not os.path.exists(subdir):
os.mkdir(subdir)
strCmd = '"%s/import_raster" %s %s %s %s %s %d %s' % (
CPP_PROGRAM_DIR, subbasinRaster, WORKING_DIR, SpatialDBName,
DB_TAB_SPATIAL.upper(), HOSTNAME, PORT, tifFolder)
print strCmd
RunExternalCmd(strCmd)
# os.system(strCmd)
print 'Generating weight data...'
f.write("70, Generating weight data for interpolation of meteorology data...\n")
f.flush()
for i in range(subbasinStartID, nSubbasins + 1):
GenerateWeightInfo(conn, SpatialDBName, i, stormMode)
# added by Liangjun, 2016-6-17
GenerateWeightDependentParameters(conn, i)
if genIUH:
f.write("80, Generating IUH (Instantaneous Unit Hydrograph)...\n")
f.flush()
dt = 24
print 'Generating IUH (Instantaneous Unit Hydrograph)...'
strCmd = '"%s/iuh" %s %d %s %s %s %d' % (CPP_PROGRAM_DIR, HOSTNAME, PORT,
SpatialDBName, DB_TAB_SPATIAL.upper(), dt, nSubbasins)
print strCmd
# os.system(strCmd)
RunExternalCmd(strCmd)
f.write("90, Generating Grid layering...\n")
f.flush()
layeringDir = WORKING_DIR + os.sep + DIR_NAME_LAYERINFO
if not os.path.exists(layeringDir):
os.mkdir(layeringDir)
print 'Generating Grid layering...'
strCmd = '"%s/grid_layering" %s %d %s %s %s %d' % (
CPP_PROGRAM_DIR, HOSTNAME, PORT, layeringDir, SpatialDBName, DB_TAB_SPATIAL.upper(), nSubbasins)
print strCmd
# os.system(strCmd)
RunExternalCmd(strCmd)
# Test if the grid layering data is imported successfully. Added by LJ, 2016-11-3
gridLayeringFiles = ['%d_FLOWOUT_INDEX_D8' % nSubbasins, '%d_FLOWIN_INDEX_D8' % nSubbasins]
spatial = GridFS(db, DB_TAB_SPATIAL.upper())
needReRun = False
while not needReRun:
needReRun = True
for gridlyr in gridLayeringFiles:
if not spatial.exists(filename=gridlyr):
needReRun = False
print "%s is not imported successfully, grid_layering will be rerun!" % gridlyr
RunExternalCmd(strCmd)
break
# Import BMP scenario database to MongoDB
ImportBMPTables()
ImportLookupTables(TXT_DB_DIR + os.sep + sqliteFile, db)
ImportModelConfiguration(db)
ImportSubbasinStatistics()
f.write("100,Finished!")
f.close()
print 'Build DB: %s finished!' % SpatialDBName
# test code
if __name__ == "__main__":
LoadConfiguration(GetINIfile())
BuildMongoDB()
| gpl-2.0 | 4,762,656,225,364,247,000 | 37.510345 | 119 | 0.668517 | false | 3.337717 | true | false | false |
Azure/azure-sdk-for-python | sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py | 1 | 321292 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Codec(msrest.serialization.Model):
"""Describes the basic properties of all codecs.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Audio, CopyAudio, CopyVideo, Video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.Audio': 'Audio', '#Microsoft.Media.CopyAudio': 'CopyAudio', '#Microsoft.Media.CopyVideo': 'CopyVideo', '#Microsoft.Media.Video': 'Video'}
}
def __init__(
self,
**kwargs
):
super(Codec, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.label = kwargs.get('label', None)
class Audio(Codec):
"""Defines the common properties for all audio codecs.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AacAudio.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param channels: The number of channels in the audio.
:type channels: int
:param sampling_rate: The sampling rate to use for encoding in hertz.
:type sampling_rate: int
:param bitrate: The bitrate, in bits per second, of the output encoded audio.
:type bitrate: int
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'channels': {'key': 'channels', 'type': 'int'},
'sampling_rate': {'key': 'samplingRate', 'type': 'int'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AacAudio': 'AacAudio'}
}
def __init__(
self,
**kwargs
):
super(Audio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Audio' # type: str
self.channels = kwargs.get('channels', None)
self.sampling_rate = kwargs.get('sampling_rate', None)
self.bitrate = kwargs.get('bitrate', None)
class AacAudio(Audio):
"""Describes Advanced Audio Codec (AAC) audio encoding settings.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param channels: The number of channels in the audio.
:type channels: int
:param sampling_rate: The sampling rate to use for encoding in hertz.
:type sampling_rate: int
:param bitrate: The bitrate, in bits per second, of the output encoded audio.
:type bitrate: int
:param profile: The encoding profile to be used when encoding audio with AAC. Possible values
include: "AacLc", "HeAacV1", "HeAacV2".
:type profile: str or ~azure.mgmt.media.models.AacAudioProfile
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'channels': {'key': 'channels', 'type': 'int'},
'sampling_rate': {'key': 'samplingRate', 'type': 'int'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'profile': {'key': 'profile', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AacAudio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AacAudio' # type: str
self.profile = kwargs.get('profile', None)
class ClipTime(msrest.serialization.Model):
"""Base class for specifying a clip time. Use sub classes of this class to specify the time position in the media.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AbsoluteClipTime, UtcClipTime.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime', '#Microsoft.Media.UtcClipTime': 'UtcClipTime'}
}
def __init__(
self,
**kwargs
):
super(ClipTime, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AbsoluteClipTime(ClipTime):
"""Specifies the clip time as an absolute time position in the media file. The absolute time can point to a different position depending on whether the media file starts from a timestamp of zero or not.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param time: Required. The time position on the timeline of the input media. It is usually
specified as an ISO8601 period. e.g PT30S for 30 seconds.
:type time: ~datetime.timedelta
"""
_validation = {
'odata_type': {'required': True},
'time': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'time': {'key': 'time', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(AbsoluteClipTime, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AbsoluteClipTime' # type: str
self.time = kwargs['time']
class AccessControl(msrest.serialization.Model):
"""AccessControl.
:param default_action: The behavior for IP access control in Key Delivery. Possible values
include: "Allow", "Deny".
:type default_action: str or ~azure.mgmt.media.models.DefaultAction
:param ip_allow_list: The IP allow list for access control in Key Delivery. If the default
action is set to 'Allow', the IP allow list must be empty.
:type ip_allow_list: list[str]
"""
_attribute_map = {
'default_action': {'key': 'defaultAction', 'type': 'str'},
'ip_allow_list': {'key': 'ipAllowList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AccessControl, self).__init__(**kwargs)
self.default_action = kwargs.get('default_action', None)
self.ip_allow_list = kwargs.get('ip_allow_list', None)
class AccountEncryption(msrest.serialization.Model):
"""AccountEncryption.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~azure.mgmt.media.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~azure.mgmt.media.models.KeyVaultProperties
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccountFilter(ProxyResource):
"""An Account Filter.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param presentation_time_range: The presentation time range.
:type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange
:param first_quality: The first quality.
:type first_quality: ~azure.mgmt.media.models.FirstQuality
:param tracks: The tracks selection conditions.
:type tracks: list[~azure.mgmt.media.models.FilterTrackSelection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'},
'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'},
'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(AccountFilter, self).__init__(**kwargs)
self.system_data = None
self.presentation_time_range = kwargs.get('presentation_time_range', None)
self.first_quality = kwargs.get('first_quality', None)
self.tracks = kwargs.get('tracks', None)
class AccountFilterCollection(msrest.serialization.Model):
"""A collection of AccountFilter items.
:param value: A collection of AccountFilter items.
:type value: list[~azure.mgmt.media.models.AccountFilter]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccountFilter]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountFilterCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AkamaiAccessControl(msrest.serialization.Model):
"""Akamai access control.
:param akamai_signature_header_authentication_key_list: authentication key list.
:type akamai_signature_header_authentication_key_list:
list[~azure.mgmt.media.models.AkamaiSignatureHeaderAuthenticationKey]
"""
_attribute_map = {
'akamai_signature_header_authentication_key_list': {'key': 'akamaiSignatureHeaderAuthenticationKeyList', 'type': '[AkamaiSignatureHeaderAuthenticationKey]'},
}
def __init__(
self,
**kwargs
):
super(AkamaiAccessControl, self).__init__(**kwargs)
self.akamai_signature_header_authentication_key_list = kwargs.get('akamai_signature_header_authentication_key_list', None)
class AkamaiSignatureHeaderAuthenticationKey(msrest.serialization.Model):
"""Akamai Signature Header authentication key.
:param identifier: identifier of the key.
:type identifier: str
:param base64_key: authentication key.
:type base64_key: str
:param expiration: The expiration time of the authentication key.
:type expiration: ~datetime.datetime
"""
_attribute_map = {
'identifier': {'key': 'identifier', 'type': 'str'},
'base64_key': {'key': 'base64Key', 'type': 'str'},
'expiration': {'key': 'expiration', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(AkamaiSignatureHeaderAuthenticationKey, self).__init__(**kwargs)
self.identifier = kwargs.get('identifier', None)
self.base64_key = kwargs.get('base64_key', None)
self.expiration = kwargs.get('expiration', None)
class ApiError(msrest.serialization.Model):
"""The API error.
:param error: The error properties.
:type error: ~azure.mgmt.media.models.ODataError
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ODataError'},
}
def __init__(
self,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Asset(ProxyResource):
"""An Asset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar asset_id: The Asset ID.
:vartype asset_id: str
:ivar created: The creation date of the Asset.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Asset.
:vartype last_modified: ~datetime.datetime
:param alternate_id: The alternate ID of the Asset.
:type alternate_id: str
:param description: The Asset description.
:type description: str
:param container: The name of the asset blob container.
:type container: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:ivar storage_encryption_format: The Asset encryption format. One of None or
MediaStorageEncryption. Possible values include: "None", "MediaStorageClientEncryption".
:vartype storage_encryption_format: str or
~azure.mgmt.media.models.AssetStorageEncryptionFormat
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'asset_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'storage_encryption_format': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'asset_id': {'key': 'properties.assetId', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'alternate_id': {'key': 'properties.alternateId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'container': {'key': 'properties.container', 'type': 'str'},
'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'},
'storage_encryption_format': {'key': 'properties.storageEncryptionFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Asset, self).__init__(**kwargs)
self.system_data = None
self.asset_id = None
self.created = None
self.last_modified = None
self.alternate_id = kwargs.get('alternate_id', None)
self.description = kwargs.get('description', None)
self.container = kwargs.get('container', None)
self.storage_account_name = kwargs.get('storage_account_name', None)
self.storage_encryption_format = None
class AssetCollection(msrest.serialization.Model):
"""A collection of Asset items.
:param value: A collection of Asset items.
:type value: list[~azure.mgmt.media.models.Asset]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Asset]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AssetContainerSas(msrest.serialization.Model):
"""The Asset Storage container SAS URLs.
:param asset_container_sas_urls: The list of Asset container SAS URLs.
:type asset_container_sas_urls: list[str]
"""
_attribute_map = {
'asset_container_sas_urls': {'key': 'assetContainerSasUrls', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AssetContainerSas, self).__init__(**kwargs)
self.asset_container_sas_urls = kwargs.get('asset_container_sas_urls', None)
class AssetFileEncryptionMetadata(msrest.serialization.Model):
"""The Asset File Storage encryption metadata.
All required parameters must be populated in order to send to Azure.
:param initialization_vector: The Asset File initialization vector.
:type initialization_vector: str
:param asset_file_name: The Asset File name.
:type asset_file_name: str
:param asset_file_id: Required. The Asset File Id.
:type asset_file_id: str
"""
_validation = {
'asset_file_id': {'required': True},
}
_attribute_map = {
'initialization_vector': {'key': 'initializationVector', 'type': 'str'},
'asset_file_name': {'key': 'assetFileName', 'type': 'str'},
'asset_file_id': {'key': 'assetFileId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetFileEncryptionMetadata, self).__init__(**kwargs)
self.initialization_vector = kwargs.get('initialization_vector', None)
self.asset_file_name = kwargs.get('asset_file_name', None)
self.asset_file_id = kwargs['asset_file_id']
class AssetFilter(ProxyResource):
"""An Asset Filter.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param presentation_time_range: The presentation time range.
:type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange
:param first_quality: The first quality.
:type first_quality: ~azure.mgmt.media.models.FirstQuality
:param tracks: The tracks selection conditions.
:type tracks: list[~azure.mgmt.media.models.FilterTrackSelection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'},
'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'},
'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(AssetFilter, self).__init__(**kwargs)
self.system_data = None
self.presentation_time_range = kwargs.get('presentation_time_range', None)
self.first_quality = kwargs.get('first_quality', None)
self.tracks = kwargs.get('tracks', None)
class AssetFilterCollection(msrest.serialization.Model):
"""A collection of AssetFilter items.
:param value: A collection of AssetFilter items.
:type value: list[~azure.mgmt.media.models.AssetFilter]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AssetFilter]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetFilterCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class AssetStreamingLocator(msrest.serialization.Model):
"""Properties of the Streaming Locator.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Streaming Locator name.
:vartype name: str
:ivar asset_name: Asset Name.
:vartype asset_name: str
:ivar created: The creation time of the Streaming Locator.
:vartype created: ~datetime.datetime
:ivar start_time: The start time of the Streaming Locator.
:vartype start_time: ~datetime.datetime
:ivar end_time: The end time of the Streaming Locator.
:vartype end_time: ~datetime.datetime
:ivar streaming_locator_id: StreamingLocatorId of the Streaming Locator.
:vartype streaming_locator_id: str
:ivar streaming_policy_name: Name of the Streaming Policy used by this Streaming Locator.
:vartype streaming_policy_name: str
:ivar default_content_key_policy_name: Name of the default ContentKeyPolicy used by this
Streaming Locator.
:vartype default_content_key_policy_name: str
"""
_validation = {
'name': {'readonly': True},
'asset_name': {'readonly': True},
'created': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'streaming_locator_id': {'readonly': True},
'streaming_policy_name': {'readonly': True},
'default_content_key_policy_name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'asset_name': {'key': 'assetName', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'streaming_locator_id': {'key': 'streamingLocatorId', 'type': 'str'},
'streaming_policy_name': {'key': 'streamingPolicyName', 'type': 'str'},
'default_content_key_policy_name': {'key': 'defaultContentKeyPolicyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AssetStreamingLocator, self).__init__(**kwargs)
self.name = None
self.asset_name = None
self.created = None
self.start_time = None
self.end_time = None
self.streaming_locator_id = None
self.streaming_policy_name = None
self.default_content_key_policy_name = None
class Preset(msrest.serialization.Model):
"""Base type for all Presets, which define the recipe or instructions on how the input media files should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioAnalyzerPreset, BuiltInStandardEncoderPreset, FaceDetectorPreset, StandardEncoderPreset.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioAnalyzerPreset': 'AudioAnalyzerPreset', '#Microsoft.Media.BuiltInStandardEncoderPreset': 'BuiltInStandardEncoderPreset', '#Microsoft.Media.FaceDetectorPreset': 'FaceDetectorPreset', '#Microsoft.Media.StandardEncoderPreset': 'StandardEncoderPreset'}
}
def __init__(
self,
**kwargs
):
super(Preset, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AudioAnalyzerPreset(Preset):
"""The Audio Analyzer preset applies a pre-defined set of AI-based analysis operations, including speech transcription. Currently, the preset supports processing of content with a single audio track.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoAnalyzerPreset.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param audio_language: The language for the audio payload in the input using the BCP-47 format
of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is
recommended that you specify it. The language must be specified explicitly for
AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If
the language isn't specified or set to null, automatic language detection will choose the first
language detected and process with the selected language for the duration of the file. It does
not currently support dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with clearly discernable
speech. If automatic detection fails to find the language, transcription would fallback to
'en-US'." The list of supported languages is available here:
https://go.microsoft.com/fwlink/?linkid=2109463.
:type audio_language: str
:param mode: Determines the set of audio analysis operations to be performed. If unspecified,
the Standard AudioAnalysisMode would be chosen. Possible values include: "Standard", "Basic".
:type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'audio_language': {'key': 'audioLanguage', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.VideoAnalyzerPreset': 'VideoAnalyzerPreset'}
}
def __init__(
self,
**kwargs
):
super(AudioAnalyzerPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioAnalyzerPreset' # type: str
self.audio_language = kwargs.get('audio_language', None)
self.mode = kwargs.get('mode', None)
self.experimental_options = kwargs.get('experimental_options', None)
class Overlay(msrest.serialization.Model):
"""Base type for all overlays - image, audio or video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioOverlay, VideoOverlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioOverlay': 'AudioOverlay', '#Microsoft.Media.VideoOverlay': 'VideoOverlay'}
}
def __init__(
self,
**kwargs
):
super(Overlay, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.input_label = kwargs['input_label']
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
self.fade_in_duration = kwargs.get('fade_in_duration', None)
self.fade_out_duration = kwargs.get('fade_out_duration', None)
self.audio_gain_level = kwargs.get('audio_gain_level', None)
class AudioOverlay(Overlay):
"""Describes the properties of an audio overlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AudioOverlay, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioOverlay' # type: str
class TrackDescriptor(msrest.serialization.Model):
"""Base type for all TrackDescriptor types, which define the metadata and selection for tracks that should be processed by a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'}
}
def __init__(
self,
**kwargs
):
super(TrackDescriptor, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class AudioTrackDescriptor(TrackDescriptor):
"""A TrackSelection to select audio tracks.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'}
}
def __init__(
self,
**kwargs
):
super(AudioTrackDescriptor, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.AudioTrackDescriptor' # type: str
self.channel_mapping = kwargs.get('channel_mapping', None)
class BuiltInStandardEncoderPreset(Preset):
"""Describes a built-in preset for encoding the input video with the Standard Encoder.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param preset_name: Required. The built-in preset to be used for encoding videos. Possible
values include: "H264SingleBitrateSD", "H264SingleBitrate720p", "H264SingleBitrate1080p",
"AdaptiveStreaming", "AACGoodQualityAudio", "ContentAwareEncodingExperimental",
"ContentAwareEncoding", "CopyAllBitrateNonInterleaved", "H264MultipleBitrate1080p",
"H264MultipleBitrate720p", "H264MultipleBitrateSD", "H265ContentAwareEncoding",
"H265AdaptiveStreaming", "H265SingleBitrate720p", "H265SingleBitrate1080p",
"H265SingleBitrate4K".
:type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset
"""
_validation = {
'odata_type': {'required': True},
'preset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'preset_name': {'key': 'presetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BuiltInStandardEncoderPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.BuiltInStandardEncoderPreset' # type: str
self.preset_name = kwargs['preset_name']
class CbcsDrmConfiguration(msrest.serialization.Model):
"""Class to specify DRM configurations of CommonEncryptionCbcs scheme in Streaming Policy.
:param fair_play: FairPlay configurations.
:type fair_play: ~azure.mgmt.media.models.StreamingPolicyFairPlayConfiguration
:param play_ready: PlayReady configurations.
:type play_ready: ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration
:param widevine: Widevine configurations.
:type widevine: ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration
"""
_attribute_map = {
'fair_play': {'key': 'fairPlay', 'type': 'StreamingPolicyFairPlayConfiguration'},
'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'},
'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CbcsDrmConfiguration, self).__init__(**kwargs)
self.fair_play = kwargs.get('fair_play', None)
self.play_ready = kwargs.get('play_ready', None)
self.widevine = kwargs.get('widevine', None)
class CencDrmConfiguration(msrest.serialization.Model):
"""Class to specify DRM configurations of CommonEncryptionCenc scheme in Streaming Policy.
:param play_ready: PlayReady configurations.
:type play_ready: ~azure.mgmt.media.models.StreamingPolicyPlayReadyConfiguration
:param widevine: Widevine configurations.
:type widevine: ~azure.mgmt.media.models.StreamingPolicyWidevineConfiguration
"""
_attribute_map = {
'play_ready': {'key': 'playReady', 'type': 'StreamingPolicyPlayReadyConfiguration'},
'widevine': {'key': 'widevine', 'type': 'StreamingPolicyWidevineConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CencDrmConfiguration, self).__init__(**kwargs)
self.play_ready = kwargs.get('play_ready', None)
self.widevine = kwargs.get('widevine', None)
class CheckNameAvailabilityInput(msrest.serialization.Model):
"""The input to the check name availability request.
:param name: The account name.
:type name: str
:param type: The account type. For a Media Services account, this should be 'MediaServices'.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CommonEncryptionCbcs(msrest.serialization.Model):
"""Class for CommonEncryptionCbcs encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param drm: Configuration of DRMs for current encryption scheme.
:type drm: ~azure.mgmt.media.models.CbcsDrmConfiguration
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'drm': {'key': 'drm', 'type': 'CbcsDrmConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CommonEncryptionCbcs, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.drm = kwargs.get('drm', None)
class CommonEncryptionCenc(msrest.serialization.Model):
"""Class for envelope encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param drm: Configuration of DRMs for CommonEncryptionCenc encryption scheme.
:type drm: ~azure.mgmt.media.models.CencDrmConfiguration
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'drm': {'key': 'drm', 'type': 'CencDrmConfiguration'},
}
def __init__(
self,
**kwargs
):
super(CommonEncryptionCenc, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.drm = kwargs.get('drm', None)
class ContentKeyPolicy(ProxyResource):
"""A Content Key Policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar policy_id: The legacy Policy ID.
:vartype policy_id: str
:ivar created: The creation date of the Policy.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Policy.
:vartype last_modified: ~datetime.datetime
:param description: A description for the Policy.
:type description: str
:param options: The Key Policy options.
:type options: list[~azure.mgmt.media.models.ContentKeyPolicyOption]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'policy_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'policy_id': {'key': 'properties.policyId', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'options': {'key': 'properties.options', 'type': '[ContentKeyPolicyOption]'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicy, self).__init__(**kwargs)
self.system_data = None
self.policy_id = None
self.created = None
self.last_modified = None
self.description = kwargs.get('description', None)
self.options = kwargs.get('options', None)
class ContentKeyPolicyConfiguration(msrest.serialization.Model):
"""Base class for Content Key Policy configuration. A derived class must be used to create a configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyClearKeyConfiguration, ContentKeyPolicyFairPlayConfiguration, ContentKeyPolicyPlayReadyConfiguration, ContentKeyPolicyUnknownConfiguration, ContentKeyPolicyWidevineConfiguration.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyClearKeyConfiguration': 'ContentKeyPolicyClearKeyConfiguration', '#Microsoft.Media.ContentKeyPolicyFairPlayConfiguration': 'ContentKeyPolicyFairPlayConfiguration', '#Microsoft.Media.ContentKeyPolicyPlayReadyConfiguration': 'ContentKeyPolicyPlayReadyConfiguration', '#Microsoft.Media.ContentKeyPolicyUnknownConfiguration': 'ContentKeyPolicyUnknownConfiguration', '#Microsoft.Media.ContentKeyPolicyWidevineConfiguration': 'ContentKeyPolicyWidevineConfiguration'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyConfiguration, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyClearKeyConfiguration(ContentKeyPolicyConfiguration):
"""Represents a configuration for non-DRM keys.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyClearKeyConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyClearKeyConfiguration' # type: str
class ContentKeyPolicyCollection(msrest.serialization.Model):
"""A collection of ContentKeyPolicy items.
:param value: A collection of ContentKeyPolicy items.
:type value: list[~azure.mgmt.media.models.ContentKeyPolicy]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ContentKeyPolicy]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class ContentKeyPolicyFairPlayConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for FairPlay licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param ask: Required. The key that must be used as FairPlay Application Secret key.
:type ask: bytearray
:param fair_play_pfx_password: Required. The password encrypting FairPlay certificate in PKCS
12 (pfx) format.
:type fair_play_pfx_password: str
:param fair_play_pfx: Required. The Base64 representation of FairPlay certificate in PKCS 12
(pfx) format (including private key).
:type fair_play_pfx: str
:param rental_and_lease_key_type: Required. The rental and lease key type. Possible values
include: "Unknown", "Undefined", "DualExpiry", "PersistentUnlimited", "PersistentLimited".
:type rental_and_lease_key_type: str or
~azure.mgmt.media.models.ContentKeyPolicyFairPlayRentalAndLeaseKeyType
:param rental_duration: Required. The rental duration. Must be greater than or equal to 0.
:type rental_duration: long
:param offline_rental_configuration: Offline rental policy.
:type offline_rental_configuration:
~azure.mgmt.media.models.ContentKeyPolicyFairPlayOfflineRentalConfiguration
"""
_validation = {
'odata_type': {'required': True},
'ask': {'required': True},
'fair_play_pfx_password': {'required': True},
'fair_play_pfx': {'required': True},
'rental_and_lease_key_type': {'required': True},
'rental_duration': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'ask': {'key': 'ask', 'type': 'bytearray'},
'fair_play_pfx_password': {'key': 'fairPlayPfxPassword', 'type': 'str'},
'fair_play_pfx': {'key': 'fairPlayPfx', 'type': 'str'},
'rental_and_lease_key_type': {'key': 'rentalAndLeaseKeyType', 'type': 'str'},
'rental_duration': {'key': 'rentalDuration', 'type': 'long'},
'offline_rental_configuration': {'key': 'offlineRentalConfiguration', 'type': 'ContentKeyPolicyFairPlayOfflineRentalConfiguration'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyFairPlayConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyFairPlayConfiguration' # type: str
self.ask = kwargs['ask']
self.fair_play_pfx_password = kwargs['fair_play_pfx_password']
self.fair_play_pfx = kwargs['fair_play_pfx']
self.rental_and_lease_key_type = kwargs['rental_and_lease_key_type']
self.rental_duration = kwargs['rental_duration']
self.offline_rental_configuration = kwargs.get('offline_rental_configuration', None)
class ContentKeyPolicyFairPlayOfflineRentalConfiguration(msrest.serialization.Model):
"""ContentKeyPolicyFairPlayOfflineRentalConfiguration.
All required parameters must be populated in order to send to Azure.
:param playback_duration_seconds: Required. Playback duration.
:type playback_duration_seconds: long
:param storage_duration_seconds: Required. Storage duration.
:type storage_duration_seconds: long
"""
_validation = {
'playback_duration_seconds': {'required': True},
'storage_duration_seconds': {'required': True},
}
_attribute_map = {
'playback_duration_seconds': {'key': 'playbackDurationSeconds', 'type': 'long'},
'storage_duration_seconds': {'key': 'storageDurationSeconds', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyFairPlayOfflineRentalConfiguration, self).__init__(**kwargs)
self.playback_duration_seconds = kwargs['playback_duration_seconds']
self.storage_duration_seconds = kwargs['storage_duration_seconds']
class ContentKeyPolicyRestriction(msrest.serialization.Model):
"""Base class for Content Key Policy restrictions. A derived class must be used to create a restriction.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyOpenRestriction, ContentKeyPolicyTokenRestriction, ContentKeyPolicyUnknownRestriction.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyOpenRestriction': 'ContentKeyPolicyOpenRestriction', '#Microsoft.Media.ContentKeyPolicyTokenRestriction': 'ContentKeyPolicyTokenRestriction', '#Microsoft.Media.ContentKeyPolicyUnknownRestriction': 'ContentKeyPolicyUnknownRestriction'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRestriction, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyOpenRestriction(ContentKeyPolicyRestriction):
"""Represents an open restriction. License or key will be delivered on every request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyOpenRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyOpenRestriction' # type: str
class ContentKeyPolicyOption(msrest.serialization.Model):
"""Represents a policy option.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar policy_option_id: The legacy Policy Option ID.
:vartype policy_option_id: str
:param name: The Policy Option description.
:type name: str
:param configuration: Required. The key delivery configuration.
:type configuration: ~azure.mgmt.media.models.ContentKeyPolicyConfiguration
:param restriction: Required. The requirements that must be met to deliver keys with this
configuration.
:type restriction: ~azure.mgmt.media.models.ContentKeyPolicyRestriction
"""
_validation = {
'policy_option_id': {'readonly': True},
'configuration': {'required': True},
'restriction': {'required': True},
}
_attribute_map = {
'policy_option_id': {'key': 'policyOptionId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'configuration': {'key': 'configuration', 'type': 'ContentKeyPolicyConfiguration'},
'restriction': {'key': 'restriction', 'type': 'ContentKeyPolicyRestriction'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyOption, self).__init__(**kwargs)
self.policy_option_id = None
self.name = kwargs.get('name', None)
self.configuration = kwargs['configuration']
self.restriction = kwargs['restriction']
class ContentKeyPolicyPlayReadyConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for PlayReady licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param licenses: Required. The PlayReady licenses.
:type licenses: list[~azure.mgmt.media.models.ContentKeyPolicyPlayReadyLicense]
:param response_custom_data: The custom response data.
:type response_custom_data: str
"""
_validation = {
'odata_type': {'required': True},
'licenses': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'licenses': {'key': 'licenses', 'type': '[ContentKeyPolicyPlayReadyLicense]'},
'response_custom_data': {'key': 'responseCustomData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyConfiguration' # type: str
self.licenses = kwargs['licenses']
self.response_custom_data = kwargs.get('response_custom_data', None)
class ContentKeyPolicyPlayReadyContentKeyLocation(msrest.serialization.Model):
"""Base class for content key ID location. A derived class must be used to represent the location.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader': 'ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader', '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier': 'ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentKeyLocation, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader(ContentKeyPolicyPlayReadyContentKeyLocation):
"""Specifies that the content key ID is in the PlayReady header.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromHeader' # type: str
class ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier(ContentKeyPolicyPlayReadyContentKeyLocation):
"""Specifies that the content key ID is specified in the PlayReady configuration.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param key_id: Required. The content key ID.
:type key_id: str
"""
_validation = {
'odata_type': {'required': True},
'key_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'key_id': {'key': 'keyId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyPlayReadyContentEncryptionKeyFromKeyIdentifier' # type: str
self.key_id = kwargs['key_id']
class ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction(msrest.serialization.Model):
"""Configures the Explicit Analog Television Output Restriction control bits. For further details see the PlayReady Compliance Rules.
All required parameters must be populated in order to send to Azure.
:param best_effort: Required. Indicates whether this restriction is enforced on a Best Effort
basis.
:type best_effort: bool
:param configuration_data: Required. Configures the restriction control bits. Must be between 0
and 3 inclusive.
:type configuration_data: int
"""
_validation = {
'best_effort': {'required': True},
'configuration_data': {'required': True},
}
_attribute_map = {
'best_effort': {'key': 'bestEffort', 'type': 'bool'},
'configuration_data': {'key': 'configurationData', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction, self).__init__(**kwargs)
self.best_effort = kwargs['best_effort']
self.configuration_data = kwargs['configuration_data']
class ContentKeyPolicyPlayReadyLicense(msrest.serialization.Model):
"""The PlayReady license.
All required parameters must be populated in order to send to Azure.
:param allow_test_devices: Required. A flag indicating whether test devices can use the
license.
:type allow_test_devices: bool
:param begin_date: The begin date of license.
:type begin_date: ~datetime.datetime
:param expiration_date: The expiration date of license.
:type expiration_date: ~datetime.datetime
:param relative_begin_date: The relative begin date of license.
:type relative_begin_date: ~datetime.timedelta
:param relative_expiration_date: The relative expiration date of license.
:type relative_expiration_date: ~datetime.timedelta
:param grace_period: The grace period of license.
:type grace_period: ~datetime.timedelta
:param play_right: The license PlayRight.
:type play_right: ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyPlayRight
:param license_type: Required. The license type. Possible values include: "Unknown",
"NonPersistent", "Persistent".
:type license_type: str or ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyLicenseType
:param content_key_location: Required. The content key location.
:type content_key_location:
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyContentKeyLocation
:param content_type: Required. The PlayReady content type. Possible values include: "Unknown",
"Unspecified", "UltraVioletDownload", "UltraVioletStreaming".
:type content_type: str or ~azure.mgmt.media.models.ContentKeyPolicyPlayReadyContentType
"""
_validation = {
'allow_test_devices': {'required': True},
'license_type': {'required': True},
'content_key_location': {'required': True},
'content_type': {'required': True},
}
_attribute_map = {
'allow_test_devices': {'key': 'allowTestDevices', 'type': 'bool'},
'begin_date': {'key': 'beginDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'relative_begin_date': {'key': 'relativeBeginDate', 'type': 'duration'},
'relative_expiration_date': {'key': 'relativeExpirationDate', 'type': 'duration'},
'grace_period': {'key': 'gracePeriod', 'type': 'duration'},
'play_right': {'key': 'playRight', 'type': 'ContentKeyPolicyPlayReadyPlayRight'},
'license_type': {'key': 'licenseType', 'type': 'str'},
'content_key_location': {'key': 'contentKeyLocation', 'type': 'ContentKeyPolicyPlayReadyContentKeyLocation'},
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyLicense, self).__init__(**kwargs)
self.allow_test_devices = kwargs['allow_test_devices']
self.begin_date = kwargs.get('begin_date', None)
self.expiration_date = kwargs.get('expiration_date', None)
self.relative_begin_date = kwargs.get('relative_begin_date', None)
self.relative_expiration_date = kwargs.get('relative_expiration_date', None)
self.grace_period = kwargs.get('grace_period', None)
self.play_right = kwargs.get('play_right', None)
self.license_type = kwargs['license_type']
self.content_key_location = kwargs['content_key_location']
self.content_type = kwargs['content_type']
class ContentKeyPolicyPlayReadyPlayRight(msrest.serialization.Model):
"""Configures the Play Right in the PlayReady license.
All required parameters must be populated in order to send to Azure.
:param first_play_expiration: The amount of time that the license is valid after the license is
first used to play content.
:type first_play_expiration: ~datetime.timedelta
:param scms_restriction: Configures the Serial Copy Management System (SCMS) in the license.
Must be between 0 and 3 inclusive.
:type scms_restriction: int
:param agc_and_color_stripe_restriction: Configures Automatic Gain Control (AGC) and Color
Stripe in the license. Must be between 0 and 3 inclusive.
:type agc_and_color_stripe_restriction: int
:param explicit_analog_television_output_restriction: Configures the Explicit Analog Television
Output Restriction in the license. Configuration data must be between 0 and 3 inclusive.
:type explicit_analog_television_output_restriction:
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction
:param digital_video_only_content_restriction: Required. Enables the Image Constraint For
Analog Component Video Restriction in the license.
:type digital_video_only_content_restriction: bool
:param image_constraint_for_analog_component_video_restriction: Required. Enables the Image
Constraint For Analog Component Video Restriction in the license.
:type image_constraint_for_analog_component_video_restriction: bool
:param image_constraint_for_analog_computer_monitor_restriction: Required. Enables the Image
Constraint For Analog Component Video Restriction in the license.
:type image_constraint_for_analog_computer_monitor_restriction: bool
:param allow_passing_video_content_to_unknown_output: Required. Configures Unknown output
handling settings of the license. Possible values include: "Unknown", "NotAllowed", "Allowed",
"AllowedWithVideoConstriction".
:type allow_passing_video_content_to_unknown_output: str or
~azure.mgmt.media.models.ContentKeyPolicyPlayReadyUnknownOutputPassingOption
:param uncompressed_digital_video_opl: Specifies the output protection level for uncompressed
digital video.
:type uncompressed_digital_video_opl: int
:param compressed_digital_video_opl: Specifies the output protection level for compressed
digital video.
:type compressed_digital_video_opl: int
:param analog_video_opl: Specifies the output protection level for compressed digital audio.
:type analog_video_opl: int
:param compressed_digital_audio_opl: Specifies the output protection level for compressed
digital audio.
:type compressed_digital_audio_opl: int
:param uncompressed_digital_audio_opl: Specifies the output protection level for uncompressed
digital audio.
:type uncompressed_digital_audio_opl: int
"""
_validation = {
'digital_video_only_content_restriction': {'required': True},
'image_constraint_for_analog_component_video_restriction': {'required': True},
'image_constraint_for_analog_computer_monitor_restriction': {'required': True},
'allow_passing_video_content_to_unknown_output': {'required': True},
}
_attribute_map = {
'first_play_expiration': {'key': 'firstPlayExpiration', 'type': 'duration'},
'scms_restriction': {'key': 'scmsRestriction', 'type': 'int'},
'agc_and_color_stripe_restriction': {'key': 'agcAndColorStripeRestriction', 'type': 'int'},
'explicit_analog_television_output_restriction': {'key': 'explicitAnalogTelevisionOutputRestriction', 'type': 'ContentKeyPolicyPlayReadyExplicitAnalogTelevisionRestriction'},
'digital_video_only_content_restriction': {'key': 'digitalVideoOnlyContentRestriction', 'type': 'bool'},
'image_constraint_for_analog_component_video_restriction': {'key': 'imageConstraintForAnalogComponentVideoRestriction', 'type': 'bool'},
'image_constraint_for_analog_computer_monitor_restriction': {'key': 'imageConstraintForAnalogComputerMonitorRestriction', 'type': 'bool'},
'allow_passing_video_content_to_unknown_output': {'key': 'allowPassingVideoContentToUnknownOutput', 'type': 'str'},
'uncompressed_digital_video_opl': {'key': 'uncompressedDigitalVideoOpl', 'type': 'int'},
'compressed_digital_video_opl': {'key': 'compressedDigitalVideoOpl', 'type': 'int'},
'analog_video_opl': {'key': 'analogVideoOpl', 'type': 'int'},
'compressed_digital_audio_opl': {'key': 'compressedDigitalAudioOpl', 'type': 'int'},
'uncompressed_digital_audio_opl': {'key': 'uncompressedDigitalAudioOpl', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyPlayReadyPlayRight, self).__init__(**kwargs)
self.first_play_expiration = kwargs.get('first_play_expiration', None)
self.scms_restriction = kwargs.get('scms_restriction', None)
self.agc_and_color_stripe_restriction = kwargs.get('agc_and_color_stripe_restriction', None)
self.explicit_analog_television_output_restriction = kwargs.get('explicit_analog_television_output_restriction', None)
self.digital_video_only_content_restriction = kwargs['digital_video_only_content_restriction']
self.image_constraint_for_analog_component_video_restriction = kwargs['image_constraint_for_analog_component_video_restriction']
self.image_constraint_for_analog_computer_monitor_restriction = kwargs['image_constraint_for_analog_computer_monitor_restriction']
self.allow_passing_video_content_to_unknown_output = kwargs['allow_passing_video_content_to_unknown_output']
self.uncompressed_digital_video_opl = kwargs.get('uncompressed_digital_video_opl', None)
self.compressed_digital_video_opl = kwargs.get('compressed_digital_video_opl', None)
self.analog_video_opl = kwargs.get('analog_video_opl', None)
self.compressed_digital_audio_opl = kwargs.get('compressed_digital_audio_opl', None)
self.uncompressed_digital_audio_opl = kwargs.get('uncompressed_digital_audio_opl', None)
class ContentKeyPolicyProperties(msrest.serialization.Model):
"""The properties of the Content Key Policy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar policy_id: The legacy Policy ID.
:vartype policy_id: str
:ivar created: The creation date of the Policy.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified date of the Policy.
:vartype last_modified: ~datetime.datetime
:param description: A description for the Policy.
:type description: str
:param options: Required. The Key Policy options.
:type options: list[~azure.mgmt.media.models.ContentKeyPolicyOption]
"""
_validation = {
'policy_id': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'options': {'required': True},
}
_attribute_map = {
'policy_id': {'key': 'policyId', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'options': {'key': 'options', 'type': '[ContentKeyPolicyOption]'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyProperties, self).__init__(**kwargs)
self.policy_id = None
self.created = None
self.last_modified = None
self.description = kwargs.get('description', None)
self.options = kwargs['options']
class ContentKeyPolicyRestrictionTokenKey(msrest.serialization.Model):
"""Base class for Content Key Policy key for token validation. A derived class must be used to create a token key.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentKeyPolicyRsaTokenKey, ContentKeyPolicySymmetricTokenKey, ContentKeyPolicyX509CertificateTokenKey.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ContentKeyPolicyRsaTokenKey': 'ContentKeyPolicyRsaTokenKey', '#Microsoft.Media.ContentKeyPolicySymmetricTokenKey': 'ContentKeyPolicySymmetricTokenKey', '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey': 'ContentKeyPolicyX509CertificateTokenKey'}
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRestrictionTokenKey, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class ContentKeyPolicyRsaTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a RSA key for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param exponent: Required. The RSA Parameter exponent.
:type exponent: bytearray
:param modulus: Required. The RSA Parameter modulus.
:type modulus: bytearray
"""
_validation = {
'odata_type': {'required': True},
'exponent': {'required': True},
'modulus': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'exponent': {'key': 'exponent', 'type': 'bytearray'},
'modulus': {'key': 'modulus', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyRsaTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyRsaTokenKey' # type: str
self.exponent = kwargs['exponent']
self.modulus = kwargs['modulus']
class ContentKeyPolicySymmetricTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a symmetric key for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param key_value: Required. The key value of the key.
:type key_value: bytearray
"""
_validation = {
'odata_type': {'required': True},
'key_value': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'key_value': {'key': 'keyValue', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicySymmetricTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicySymmetricTokenKey' # type: str
self.key_value = kwargs['key_value']
class ContentKeyPolicyTokenClaim(msrest.serialization.Model):
"""Represents a token claim.
:param claim_type: Token claim type.
:type claim_type: str
:param claim_value: Token claim value.
:type claim_value: str
"""
_attribute_map = {
'claim_type': {'key': 'claimType', 'type': 'str'},
'claim_value': {'key': 'claimValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyTokenClaim, self).__init__(**kwargs)
self.claim_type = kwargs.get('claim_type', None)
self.claim_value = kwargs.get('claim_value', None)
class ContentKeyPolicyTokenRestriction(ContentKeyPolicyRestriction):
"""Represents a token restriction. Provided token must match these requirements for successful license or key delivery.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param issuer: Required. The token issuer.
:type issuer: str
:param audience: Required. The audience for the token.
:type audience: str
:param primary_verification_key: Required. The primary verification key.
:type primary_verification_key: ~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenKey
:param alternate_verification_keys: A list of alternative verification keys.
:type alternate_verification_keys:
list[~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenKey]
:param required_claims: A list of required token claims.
:type required_claims: list[~azure.mgmt.media.models.ContentKeyPolicyTokenClaim]
:param restriction_token_type: Required. The type of token. Possible values include: "Unknown",
"Swt", "Jwt".
:type restriction_token_type: str or
~azure.mgmt.media.models.ContentKeyPolicyRestrictionTokenType
:param open_id_connect_discovery_document: The OpenID connect discovery document.
:type open_id_connect_discovery_document: str
"""
_validation = {
'odata_type': {'required': True},
'issuer': {'required': True},
'audience': {'required': True},
'primary_verification_key': {'required': True},
'restriction_token_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'audience': {'key': 'audience', 'type': 'str'},
'primary_verification_key': {'key': 'primaryVerificationKey', 'type': 'ContentKeyPolicyRestrictionTokenKey'},
'alternate_verification_keys': {'key': 'alternateVerificationKeys', 'type': '[ContentKeyPolicyRestrictionTokenKey]'},
'required_claims': {'key': 'requiredClaims', 'type': '[ContentKeyPolicyTokenClaim]'},
'restriction_token_type': {'key': 'restrictionTokenType', 'type': 'str'},
'open_id_connect_discovery_document': {'key': 'openIdConnectDiscoveryDocument', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyTokenRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyTokenRestriction' # type: str
self.issuer = kwargs['issuer']
self.audience = kwargs['audience']
self.primary_verification_key = kwargs['primary_verification_key']
self.alternate_verification_keys = kwargs.get('alternate_verification_keys', None)
self.required_claims = kwargs.get('required_claims', None)
self.restriction_token_type = kwargs['restriction_token_type']
self.open_id_connect_discovery_document = kwargs.get('open_id_connect_discovery_document', None)
class ContentKeyPolicyUnknownConfiguration(ContentKeyPolicyConfiguration):
"""Represents a ContentKeyPolicyConfiguration that is unavailable in the current API version.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyUnknownConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyUnknownConfiguration' # type: str
class ContentKeyPolicyUnknownRestriction(ContentKeyPolicyRestriction):
"""Represents a ContentKeyPolicyRestriction that is unavailable in the current API version.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyUnknownRestriction, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyUnknownRestriction' # type: str
class ContentKeyPolicyWidevineConfiguration(ContentKeyPolicyConfiguration):
"""Specifies a configuration for Widevine licenses.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param widevine_template: Required. The Widevine template.
:type widevine_template: str
"""
_validation = {
'odata_type': {'required': True},
'widevine_template': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'widevine_template': {'key': 'widevineTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyWidevineConfiguration, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyWidevineConfiguration' # type: str
self.widevine_template = kwargs['widevine_template']
class ContentKeyPolicyX509CertificateTokenKey(ContentKeyPolicyRestrictionTokenKey):
"""Specifies a certificate for token validation.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param raw_body: Required. The raw data field of a certificate in PKCS 12 format
(X509Certificate2 in .NET).
:type raw_body: bytearray
"""
_validation = {
'odata_type': {'required': True},
'raw_body': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'raw_body': {'key': 'rawBody', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(ContentKeyPolicyX509CertificateTokenKey, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ContentKeyPolicyX509CertificateTokenKey' # type: str
self.raw_body = kwargs['raw_body']
class CopyAudio(Codec):
"""A codec flag, which tells the encoder to copy the input audio bitstream.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CopyAudio, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.CopyAudio' # type: str
class CopyVideo(Codec):
"""A codec flag, which tells the encoder to copy the input video bitstream without re-encoding.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CopyVideo, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.CopyVideo' # type: str
class CrossSiteAccessPolicies(msrest.serialization.Model):
"""The client access policy.
:param client_access_policy: The content of clientaccesspolicy.xml used by Silverlight.
:type client_access_policy: str
:param cross_domain_policy: The content of crossdomain.xml used by Silverlight.
:type cross_domain_policy: str
"""
_attribute_map = {
'client_access_policy': {'key': 'clientAccessPolicy', 'type': 'str'},
'cross_domain_policy': {'key': 'crossDomainPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CrossSiteAccessPolicies, self).__init__(**kwargs)
self.client_access_policy = kwargs.get('client_access_policy', None)
self.cross_domain_policy = kwargs.get('cross_domain_policy', None)
class DefaultKey(msrest.serialization.Model):
"""Class to specify properties of default content key for each encryption scheme.
:param label: Label can be used to specify Content Key when creating a Streaming Locator.
:type label: str
:param policy_name: Policy used by Default Key.
:type policy_name: str
"""
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DefaultKey, self).__init__(**kwargs)
self.label = kwargs.get('label', None)
self.policy_name = kwargs.get('policy_name', None)
class Deinterlace(msrest.serialization.Model):
"""Describes the de-interlacing settings.
:param parity: The field parity for de-interlacing, defaults to Auto. Possible values include:
"Auto", "TopFieldFirst", "BottomFieldFirst".
:type parity: str or ~azure.mgmt.media.models.DeinterlaceParity
:param mode: The deinterlacing mode. Defaults to AutoPixelAdaptive. Possible values include:
"Off", "AutoPixelAdaptive".
:type mode: str or ~azure.mgmt.media.models.DeinterlaceMode
"""
_attribute_map = {
'parity': {'key': 'parity', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Deinterlace, self).__init__(**kwargs)
self.parity = kwargs.get('parity', None)
self.mode = kwargs.get('mode', None)
class EdgePolicies(msrest.serialization.Model):
"""EdgePolicies.
:param usage_data_collection_policy:
:type usage_data_collection_policy: ~azure.mgmt.media.models.EdgeUsageDataCollectionPolicy
"""
_attribute_map = {
'usage_data_collection_policy': {'key': 'usageDataCollectionPolicy', 'type': 'EdgeUsageDataCollectionPolicy'},
}
def __init__(
self,
**kwargs
):
super(EdgePolicies, self).__init__(**kwargs)
self.usage_data_collection_policy = kwargs.get('usage_data_collection_policy', None)
class EdgeUsageDataCollectionPolicy(msrest.serialization.Model):
"""EdgeUsageDataCollectionPolicy.
:param data_collection_frequency: Usage data collection frequency in ISO 8601 duration format
e.g. PT10M , PT5H.
:type data_collection_frequency: str
:param data_reporting_frequency: Usage data reporting frequency in ISO 8601 duration format
e.g. PT10M , PT5H.
:type data_reporting_frequency: str
:param max_allowed_unreported_usage_duration: Maximum time for which the functionality of the
device will not be hampered for not reporting the usage data.
:type max_allowed_unreported_usage_duration: str
:param event_hub_details: Details of Event Hub where the usage will be reported.
:type event_hub_details: ~azure.mgmt.media.models.EdgeUsageDataEventHub
"""
_attribute_map = {
'data_collection_frequency': {'key': 'dataCollectionFrequency', 'type': 'str'},
'data_reporting_frequency': {'key': 'dataReportingFrequency', 'type': 'str'},
'max_allowed_unreported_usage_duration': {'key': 'maxAllowedUnreportedUsageDuration', 'type': 'str'},
'event_hub_details': {'key': 'eventHubDetails', 'type': 'EdgeUsageDataEventHub'},
}
def __init__(
self,
**kwargs
):
super(EdgeUsageDataCollectionPolicy, self).__init__(**kwargs)
self.data_collection_frequency = kwargs.get('data_collection_frequency', None)
self.data_reporting_frequency = kwargs.get('data_reporting_frequency', None)
self.max_allowed_unreported_usage_duration = kwargs.get('max_allowed_unreported_usage_duration', None)
self.event_hub_details = kwargs.get('event_hub_details', None)
class EdgeUsageDataEventHub(msrest.serialization.Model):
"""EdgeUsageDataEventHub.
:param name: Name of the Event Hub where usage will be reported.
:type name: str
:param namespace: Namespace of the Event Hub where usage will be reported.
:type namespace: str
:param token: SAS token needed to interact with Event Hub.
:type token: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeUsageDataEventHub, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.namespace = kwargs.get('namespace', None)
self.token = kwargs.get('token', None)
class EnabledProtocols(msrest.serialization.Model):
"""Class to specify which protocols are enabled.
All required parameters must be populated in order to send to Azure.
:param download: Required. Enable Download protocol or not.
:type download: bool
:param dash: Required. Enable DASH protocol or not.
:type dash: bool
:param hls: Required. Enable HLS protocol or not.
:type hls: bool
:param smooth_streaming: Required. Enable SmoothStreaming protocol or not.
:type smooth_streaming: bool
"""
_validation = {
'download': {'required': True},
'dash': {'required': True},
'hls': {'required': True},
'smooth_streaming': {'required': True},
}
_attribute_map = {
'download': {'key': 'download', 'type': 'bool'},
'dash': {'key': 'dash', 'type': 'bool'},
'hls': {'key': 'hls', 'type': 'bool'},
'smooth_streaming': {'key': 'smoothStreaming', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(EnabledProtocols, self).__init__(**kwargs)
self.download = kwargs['download']
self.dash = kwargs['dash']
self.hls = kwargs['hls']
self.smooth_streaming = kwargs['smooth_streaming']
class EntityNameAvailabilityCheckOutput(msrest.serialization.Model):
"""The response from the check name availability request.
All required parameters must be populated in order to send to Azure.
:param name_available: Required. Specifies if the name is available.
:type name_available: bool
:param reason: Specifies the reason if the name is not available.
:type reason: str
:param message: Specifies the detailed reason if the name is not available.
:type message: str
"""
_validation = {
'name_available': {'required': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EntityNameAvailabilityCheckOutput, self).__init__(**kwargs)
self.name_available = kwargs['name_available']
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class EnvelopeEncryption(msrest.serialization.Model):
"""Class for EnvelopeEncryption encryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
:param clear_tracks: Representing which tracks should not be encrypted.
:type clear_tracks: list[~azure.mgmt.media.models.TrackSelection]
:param content_keys: Representing default content key for each encryption scheme and separate
content keys for specific tracks.
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param custom_key_acquisition_url_template: Template for the URL of the custom service
delivering keys to end user players. Not required when using Azure Media Services for issuing
keys. The template supports replaceable tokens that the service will update at runtime with
the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_key_acquisition_url_template: str
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
'clear_tracks': {'key': 'clearTracks', 'type': '[TrackSelection]'},
'content_keys': {'key': 'contentKeys', 'type': 'StreamingPolicyContentKeys'},
'custom_key_acquisition_url_template': {'key': 'customKeyAcquisitionUrlTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EnvelopeEncryption, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
self.clear_tracks = kwargs.get('clear_tracks', None)
self.content_keys = kwargs.get('content_keys', None)
self.custom_key_acquisition_url_template = kwargs.get('custom_key_acquisition_url_template', None)
class FaceDetectorPreset(Preset):
"""Describes all the settings to be used when analyzing a video in order to detect (and optionally redact) all the faces present.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param resolution: Specifies the maximum resolution at which your video is analyzed. The
default behavior is "SourceResolution," which will keep the input video at its original
resolution when analyzed. Using "StandardDefinition" will resize input videos to standard
definition while preserving the appropriate aspect ratio. It will only resize if the video is
of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before
processing. Switching to "StandardDefinition" will reduce the time it takes to process high
resolution video. It may also reduce the cost of using this component (see
https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details).
However, faces that end up being too small in the resized video may not be detected. Possible
values include: "SourceResolution", "StandardDefinition".
:type resolution: str or ~azure.mgmt.media.models.AnalysisResolution
:param mode: This mode provides the ability to choose between the following settings: 1)
Analyze - For detection only.This mode generates a metadata JSON file marking appearances of
faces throughout the video.Where possible, appearances of the same person are assigned the same
ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass
process, allowing for selective redaction of a subset of detected faces.It takes in the
metadata file from a prior analyze pass, along with the source video, and a user-selected
subset of IDs that require redaction. Possible values include: "Analyze", "Redact", "Combined".
:type mode: str or ~azure.mgmt.media.models.FaceRedactorMode
:param blur_type: Blur type. Possible values include: "Box", "Low", "Med", "High", "Black".
:type blur_type: str or ~azure.mgmt.media.models.BlurType
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'resolution': {'key': 'resolution', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'blur_type': {'key': 'blurType', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(FaceDetectorPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FaceDetectorPreset' # type: str
self.resolution = kwargs.get('resolution', None)
self.mode = kwargs.get('mode', None)
self.blur_type = kwargs.get('blur_type', None)
self.experimental_options = kwargs.get('experimental_options', None)
class Filters(msrest.serialization.Model):
"""Describes all the filtering operations, such as de-interlacing, rotation etc. that are to be applied to the input media before encoding.
:param deinterlace: The de-interlacing settings.
:type deinterlace: ~azure.mgmt.media.models.Deinterlace
:param rotation: The rotation, if any, to be applied to the input video, before it is encoded.
Default is Auto. Possible values include: "Auto", "None", "Rotate0", "Rotate90", "Rotate180",
"Rotate270".
:type rotation: str or ~azure.mgmt.media.models.Rotation
:param crop: The parameters for the rectangular window with which to crop the input video.
:type crop: ~azure.mgmt.media.models.Rectangle
:param overlays: The properties of overlays to be applied to the input video. These could be
audio, image or video overlays.
:type overlays: list[~azure.mgmt.media.models.Overlay]
"""
_attribute_map = {
'deinterlace': {'key': 'deinterlace', 'type': 'Deinterlace'},
'rotation': {'key': 'rotation', 'type': 'str'},
'crop': {'key': 'crop', 'type': 'Rectangle'},
'overlays': {'key': 'overlays', 'type': '[Overlay]'},
}
def __init__(
self,
**kwargs
):
super(Filters, self).__init__(**kwargs)
self.deinterlace = kwargs.get('deinterlace', None)
self.rotation = kwargs.get('rotation', None)
self.crop = kwargs.get('crop', None)
self.overlays = kwargs.get('overlays', None)
class FilterTrackPropertyCondition(msrest.serialization.Model):
"""The class to specify one track property condition.
All required parameters must be populated in order to send to Azure.
:param property: Required. The track property type. Possible values include: "Unknown", "Type",
"Name", "Language", "FourCC", "Bitrate".
:type property: str or ~azure.mgmt.media.models.FilterTrackPropertyType
:param value: Required. The track property value.
:type value: str
:param operation: Required. The track property condition operation. Possible values include:
"Equal", "NotEqual".
:type operation: str or ~azure.mgmt.media.models.FilterTrackPropertyCompareOperation
"""
_validation = {
'property': {'required': True},
'value': {'required': True},
'operation': {'required': True},
}
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FilterTrackPropertyCondition, self).__init__(**kwargs)
self.property = kwargs['property']
self.value = kwargs['value']
self.operation = kwargs['operation']
class FilterTrackSelection(msrest.serialization.Model):
"""Representing a list of FilterTrackPropertyConditions to select a track. The filters are combined using a logical AND operation.
All required parameters must be populated in order to send to Azure.
:param track_selections: Required. The track selections.
:type track_selections: list[~azure.mgmt.media.models.FilterTrackPropertyCondition]
"""
_validation = {
'track_selections': {'required': True},
}
_attribute_map = {
'track_selections': {'key': 'trackSelections', 'type': '[FilterTrackPropertyCondition]'},
}
def __init__(
self,
**kwargs
):
super(FilterTrackSelection, self).__init__(**kwargs)
self.track_selections = kwargs['track_selections']
class FirstQuality(msrest.serialization.Model):
"""Filter First Quality.
All required parameters must be populated in order to send to Azure.
:param bitrate: Required. The first quality bitrate.
:type bitrate: int
"""
_validation = {
'bitrate': {'required': True},
}
_attribute_map = {
'bitrate': {'key': 'bitrate', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FirstQuality, self).__init__(**kwargs)
self.bitrate = kwargs['bitrate']
class Format(msrest.serialization.Model):
"""Base class for output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ImageFormat, MultiBitrateFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'}
}
def __init__(
self,
**kwargs
):
super(Format, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.filename_pattern = kwargs['filename_pattern']
class InputDefinition(msrest.serialization.Model):
"""Base class for defining an input. Use sub classes of this class to specify tracks selections and related metadata.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FromAllInputFile, FromEachInputFile, InputFile.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'}
}
def __init__(
self,
**kwargs
):
super(InputDefinition, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.included_tracks = kwargs.get('included_tracks', None)
class FromAllInputFile(InputDefinition):
"""An InputDefinition that looks across all of the files provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a single track across a set of input files.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
def __init__(
self,
**kwargs
):
super(FromAllInputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FromAllInputFile' # type: str
class FromEachInputFile(InputDefinition):
"""An InputDefinition that looks at each input file provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each file given.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
}
def __init__(
self,
**kwargs
):
super(FromEachInputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.FromEachInputFile' # type: str
class Layer(msrest.serialization.Model):
"""The encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by specifying a layer for each desired resolution. A layer represents the properties for the video or image at a resolution.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H265VideoLayer, JpgLayer, PngLayer, VideoLayer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'}
}
def __init__(
self,
**kwargs
):
super(Layer, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
self.label = kwargs.get('label', None)
class VideoLayer(Layer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H264Layer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Whether or not adaptive B-frames are to be used when encoding this
layer. If not specified, the encoder will turn it on whenever the video profile permits its
use.
:type adaptive_b_frame: bool
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H264Layer': 'H264Layer'}
}
def __init__(
self,
**kwargs
):
super(VideoLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoLayer' # type: str
self.bitrate = kwargs['bitrate']
self.max_bitrate = kwargs.get('max_bitrate', None)
self.b_frames = kwargs.get('b_frames', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.slices = kwargs.get('slices', None)
self.adaptive_b_frame = kwargs.get('adaptive_b_frame', None)
class H264Layer(VideoLayer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.264 video codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Whether or not adaptive B-frames are to be used when encoding this
layer. If not specified, the encoder will turn it on whenever the video profile permits its
use.
:type adaptive_b_frame: bool
:param profile: We currently support Baseline, Main, High, High422, High444. Default is Auto.
Possible values include: "Auto", "Baseline", "Main", "High", "High422", "High444".
:type profile: str or ~azure.mgmt.media.models.H264VideoProfile
:param level: We currently support Level up to 6.2. The value can be Auto, or a number that
matches the H.264 profile. If not specified, the default is Auto, which lets the encoder choose
the Level that is appropriate for this layer.
:type level: str
:param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The
value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S).
:type buffer_window: ~datetime.timedelta
:param reference_frames: The number of reference frames to be used when encoding this layer. If
not specified, the encoder determines an appropriate number based on the encoder complexity
setting.
:type reference_frames: int
:param entropy_mode: The entropy mode to be used for this layer. If not specified, the encoder
chooses the mode that is appropriate for the profile and level. Possible values include:
"Cabac", "Cavlc".
:type entropy_mode: str or ~azure.mgmt.media.models.EntropyMode
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
'profile': {'key': 'profile', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'buffer_window': {'key': 'bufferWindow', 'type': 'duration'},
'reference_frames': {'key': 'referenceFrames', 'type': 'int'},
'entropy_mode': {'key': 'entropyMode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(H264Layer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H264Layer' # type: str
self.profile = kwargs.get('profile', None)
self.level = kwargs.get('level', None)
self.buffer_window = kwargs.get('buffer_window', None)
self.reference_frames = kwargs.get('reference_frames', None)
self.entropy_mode = kwargs.get('entropy_mode', None)
class Video(Codec):
"""Describes the basic properties for encoding the input video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H264Video, H265Video, Image.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image'}
}
def __init__(
self,
**kwargs
):
super(Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Video' # type: str
self.key_frame_interval = kwargs.get('key_frame_interval', None)
self.stretch_mode = kwargs.get('stretch_mode', None)
self.sync_mode = kwargs.get('sync_mode', None)
class H264Video(Video):
"""Describes all the properties for encoding a video with the H.264 codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param scene_change_detection: Whether or not the encoder should insert key frames at scene
changes. If not specified, the default is false. This flag should be set to true only when the
encoder is being configured to produce a single output video.
:type scene_change_detection: bool
:param complexity: Tells the encoder how to choose its encoding settings. The default value is
Balanced. Possible values include: "Speed", "Balanced", "Quality".
:type complexity: str or ~azure.mgmt.media.models.H264Complexity
:param layers: The collection of output H.264 layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.H264Layer]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'},
'complexity': {'key': 'complexity', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[H264Layer]'},
}
def __init__(
self,
**kwargs
):
super(H264Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H264Video' # type: str
self.scene_change_detection = kwargs.get('scene_change_detection', None)
self.complexity = kwargs.get('complexity', None)
self.layers = kwargs.get('layers', None)
class H265VideoLayer(Layer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: H265Layer.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this
value should be 3000000 This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when
encoding this layer. If not specified, the encoder will turn it on whenever the video profile
permits its use.
:type adaptive_b_frame: bool
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.H265Layer': 'H265Layer'}
}
def __init__(
self,
**kwargs
):
super(H265VideoLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265VideoLayer' # type: str
self.bitrate = kwargs['bitrate']
self.max_bitrate = kwargs.get('max_bitrate', None)
self.b_frames = kwargs.get('b_frames', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.slices = kwargs.get('slices', None)
self.adaptive_b_frame = kwargs.get('adaptive_b_frame', None)
class H265Layer(H265VideoLayer):
"""Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.265 video codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param bitrate: Required. The average bitrate in bits per second at which to encode the input
video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this
value should be 3000000 This is a required field.
:type bitrate: int
:param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be
assumed to refill. If not specified, defaults to the same value as bitrate.
:type max_bitrate: int
:param b_frames: The number of B-frames to be used when encoding this layer. If not specified,
the encoder chooses an appropriate number based on the video profile and level.
:type b_frames: int
:param frame_rate: The frame rate (in frames per second) at which to encode this layer. The
value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the
form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame
rates based on the profile and level. If it is not specified, the encoder will use the same
frame rate as the input video.
:type frame_rate: str
:param slices: The number of slices to be used when encoding this layer. If not specified,
default is zero, which means that encoder will use a single slice for each frame.
:type slices: int
:param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when
encoding this layer. If not specified, the encoder will turn it on whenever the video profile
permits its use.
:type adaptive_b_frame: bool
:param profile: We currently support Main. Default is Auto. Possible values include: "Auto",
"Main".
:type profile: str or ~azure.mgmt.media.models.H265VideoProfile
:param level: We currently support Level up to 6.2. The value can be Auto, or a number that
matches the H.265 profile. If not specified, the default is Auto, which lets the encoder choose
the Level that is appropriate for this layer.
:type level: str
:param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The
value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S).
:type buffer_window: ~datetime.timedelta
:param reference_frames: The number of reference frames to be used when encoding this layer. If
not specified, the encoder determines an appropriate number based on the encoder complexity
setting.
:type reference_frames: int
"""
_validation = {
'odata_type': {'required': True},
'bitrate': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'bitrate': {'key': 'bitrate', 'type': 'int'},
'max_bitrate': {'key': 'maxBitrate', 'type': 'int'},
'b_frames': {'key': 'bFrames', 'type': 'int'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'slices': {'key': 'slices', 'type': 'int'},
'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'},
'profile': {'key': 'profile', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'buffer_window': {'key': 'bufferWindow', 'type': 'duration'},
'reference_frames': {'key': 'referenceFrames', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(H265Layer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265Layer' # type: str
self.profile = kwargs.get('profile', None)
self.level = kwargs.get('level', None)
self.buffer_window = kwargs.get('buffer_window', None)
self.reference_frames = kwargs.get('reference_frames', None)
class H265Video(Video):
"""Describes all the properties for encoding a video with the H.265 codec.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param scene_change_detection: Specifies whether or not the encoder should insert key frames at
scene changes. If not specified, the default is false. This flag should be set to true only
when the encoder is being configured to produce a single output video.
:type scene_change_detection: bool
:param complexity: Tells the encoder how to choose its encoding settings. Quality will provide
for a higher compression ratio but at a higher cost and longer compute time. Speed will
produce a relatively larger file but is faster and more economical. The default value is
Balanced. Possible values include: "Speed", "Balanced", "Quality".
:type complexity: str or ~azure.mgmt.media.models.H265Complexity
:param layers: The collection of output H.265 layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.H265Layer]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'},
'complexity': {'key': 'complexity', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[H265Layer]'},
}
def __init__(
self,
**kwargs
):
super(H265Video, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.H265Video' # type: str
self.scene_change_detection = kwargs.get('scene_change_detection', None)
self.complexity = kwargs.get('complexity', None)
self.layers = kwargs.get('layers', None)
class Hls(msrest.serialization.Model):
"""HTTP Live Streaming (HLS) packing setting for the live output.
:param fragments_per_ts_segment: The number of fragments in an HTTP Live Streaming (HLS) TS
segment in the output of the live event. This value does not affect the packing ratio for HLS
CMAF output.
:type fragments_per_ts_segment: int
"""
_attribute_map = {
'fragments_per_ts_segment': {'key': 'fragmentsPerTsSegment', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Hls, self).__init__(**kwargs)
self.fragments_per_ts_segment = kwargs.get('fragments_per_ts_segment', None)
class Image(Video):
"""Describes the basic properties for generating thumbnails from the input video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JpgImage, PngImage.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JpgImage': 'JpgImage', '#Microsoft.Media.PngImage': 'PngImage'}
}
def __init__(
self,
**kwargs
):
super(Image, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Image' # type: str
self.start = kwargs['start']
self.step = kwargs.get('step', None)
self.range = kwargs.get('range', None)
class ImageFormat(Format):
"""Describes the properties for an output image file.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JpgFormat, PngFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'}
}
def __init__(
self,
**kwargs
):
super(ImageFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.ImageFormat' # type: str
class InputFile(InputDefinition):
"""An InputDefinition for a single file. TrackSelections are scoped to the file specified.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param included_tracks: The list of TrackDescriptors which define the metadata and selection of
tracks in the input.
:type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor]
:param filename: Name of the file that this input definition applies to.
:type filename: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'},
'filename': {'key': 'filename', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InputFile, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.InputFile' # type: str
self.filename = kwargs.get('filename', None)
class IPAccessControl(msrest.serialization.Model):
"""The IP access control.
:param allow: The IP allow list.
:type allow: list[~azure.mgmt.media.models.IPRange]
"""
_attribute_map = {
'allow': {'key': 'allow', 'type': '[IPRange]'},
}
def __init__(
self,
**kwargs
):
super(IPAccessControl, self).__init__(**kwargs)
self.allow = kwargs.get('allow', None)
class IPRange(msrest.serialization.Model):
"""The IP address range in the CIDR scheme.
:param name: The friendly name for the IP address range.
:type name: str
:param address: The IP address.
:type address: str
:param subnet_prefix_length: The subnet mask prefix length (see CIDR notation).
:type subnet_prefix_length: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'subnet_prefix_length': {'key': 'subnetPrefixLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(IPRange, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.address = kwargs.get('address', None)
self.subnet_prefix_length = kwargs.get('subnet_prefix_length', None)
class Job(ProxyResource):
"""A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: The UTC date and time when the customer has created the Job, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype created: ~datetime.datetime
:ivar state: The current state of the job. Possible values include: "Canceled", "Canceling",
"Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:param description: Optional customer supplied description of the Job.
:type description: str
:param input: The inputs for the Job.
:type input: ~azure.mgmt.media.models.JobInput
:ivar last_modified: The UTC date and time when the customer has last updated the Job, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype last_modified: ~datetime.datetime
:param outputs: The outputs for the Job.
:type outputs: list[~azure.mgmt.media.models.JobOutput]
:param priority: Priority with which the job should be processed. Higher priority jobs are
processed before lower priority jobs. If not set, the default is normal. Possible values
include: "Low", "Normal", "High".
:type priority: str or ~azure.mgmt.media.models.Priority
:param correlation_data: Customer provided key, value pairs that will be returned in Job and
JobOutput state events.
:type correlation_data: dict[str, str]
:ivar start_time: The UTC date and time at which this Job began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job finished processing.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
'state': {'readonly': True},
'last_modified': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'input': {'key': 'properties.input', 'type': 'JobInput'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'outputs': {'key': 'properties.outputs', 'type': '[JobOutput]'},
'priority': {'key': 'properties.priority', 'type': 'str'},
'correlation_data': {'key': 'properties.correlationData', 'type': '{str}'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(Job, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.state = None
self.description = kwargs.get('description', None)
self.input = kwargs.get('input', None)
self.last_modified = None
self.outputs = kwargs.get('outputs', None)
self.priority = kwargs.get('priority', None)
self.correlation_data = kwargs.get('correlation_data', None)
self.start_time = None
self.end_time = None
class JobCollection(msrest.serialization.Model):
"""A collection of Job items.
:param value: A collection of Job items.
:type value: list[~azure.mgmt.media.models.Job]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Job]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class JobError(msrest.serialization.Model):
"""Details of JobOutput errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code describing the error. Possible values include: "ServiceError",
"ServiceTransientError", "DownloadNotAccessible", "DownloadTransientError",
"UploadNotAccessible", "UploadTransientError", "ConfigurationUnsupported", "ContentMalformed",
"ContentUnsupported".
:vartype code: str or ~azure.mgmt.media.models.JobErrorCode
:ivar message: A human-readable language-dependent representation of the error.
:vartype message: str
:ivar category: Helps with categorization of errors. Possible values include: "Service",
"Download", "Upload", "Configuration", "Content".
:vartype category: str or ~azure.mgmt.media.models.JobErrorCategory
:ivar retry: Indicates that it may be possible to retry the Job. If retry is unsuccessful,
please contact Azure support via Azure Portal. Possible values include: "DoNotRetry",
"MayRetry".
:vartype retry: str or ~azure.mgmt.media.models.JobRetry
:ivar details: An array of details about specific errors that led to this reported error.
:vartype details: list[~azure.mgmt.media.models.JobErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'category': {'readonly': True},
'retry': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'retry': {'key': 'retry', 'type': 'str'},
'details': {'key': 'details', 'type': '[JobErrorDetail]'},
}
def __init__(
self,
**kwargs
):
super(JobError, self).__init__(**kwargs)
self.code = None
self.message = None
self.category = None
self.retry = None
self.details = None
class JobErrorDetail(msrest.serialization.Model):
"""Details of JobOutput errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Code describing the error detail.
:vartype code: str
:ivar message: A human-readable representation of the error.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
class JobInput(msrest.serialization.Model):
"""Base class for inputs to a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobInputClip, JobInputSequence, JobInputs.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputSequence': 'JobInputSequence', '#Microsoft.Media.JobInputs': 'JobInputs'}
}
def __init__(
self,
**kwargs
):
super(JobInput, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
class JobInputClip(JobInput):
"""Represents input files for a Job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobInputAsset, JobInputHttp.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobInputAsset': 'JobInputAsset', '#Microsoft.Media.JobInputHttp': 'JobInputHttp'}
}
def __init__(
self,
**kwargs
):
super(JobInputClip, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputClip' # type: str
self.files = kwargs.get('files', None)
self.start = kwargs.get('start', None)
self.end = kwargs.get('end', None)
self.label = kwargs.get('label', None)
self.input_definitions = kwargs.get('input_definitions', None)
class JobInputAsset(JobInputClip):
"""Represents an Asset for input into a Job.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
:param asset_name: Required. The name of the input Asset.
:type asset_name: str
"""
_validation = {
'odata_type': {'required': True},
'asset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
'asset_name': {'key': 'assetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobInputAsset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputAsset' # type: str
self.asset_name = kwargs['asset_name']
class JobInputHttp(JobInputClip):
"""Represents HTTPS job input.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each.
:type files: list[str]
:param start: Defines a point on the timeline of the input media at which processing will
start. Defaults to the beginning of the input media.
:type start: ~azure.mgmt.media.models.ClipTime
:param end: Defines a point on the timeline of the input media at which processing will end.
Defaults to the end of the input media.
:type end: ~azure.mgmt.media.models.ClipTime
:param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference
used in the Transform. For example, a Transform can be authored so as to take an image file
with the label 'xyz' and apply it as an overlay onto the input video before it is encoded. When
submitting a Job, exactly one of the JobInputs should be the image file, and it should have the
label 'xyz'.
:type label: str
:param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it
defines a list of track selections and related metadata.
:type input_definitions: list[~azure.mgmt.media.models.InputDefinition]
:param base_uri: Base URI for HTTPS job input. It will be concatenated with provided file
names. If no base uri is given, then the provided file list is assumed to be fully qualified
uris. Maximum length of 4000 characters.
:type base_uri: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
'start': {'key': 'start', 'type': 'ClipTime'},
'end': {'key': 'end', 'type': 'ClipTime'},
'label': {'key': 'label', 'type': 'str'},
'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'},
'base_uri': {'key': 'baseUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobInputHttp, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputHttp' # type: str
self.base_uri = kwargs.get('base_uri', None)
class JobInputs(JobInput):
"""Describes a list of inputs to a Job.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param inputs: List of inputs to a Job.
:type inputs: list[~azure.mgmt.media.models.JobInput]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[JobInput]'},
}
def __init__(
self,
**kwargs
):
super(JobInputs, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputs' # type: str
self.inputs = kwargs.get('inputs', None)
class JobInputSequence(JobInput):
"""A Sequence contains an ordered list of Clips where each clip is a JobInput. The Sequence will be treated as a single input.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param inputs: JobInputs that make up the timeline.
:type inputs: list[~azure.mgmt.media.models.JobInputClip]
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[JobInputClip]'},
}
def __init__(
self,
**kwargs
):
super(JobInputSequence, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobInputSequence' # type: str
self.inputs = kwargs.get('inputs', None)
class JobOutput(msrest.serialization.Model):
"""Describes all the properties of a JobOutput.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobOutputAsset.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:ivar error: If the JobOutput is in the Error state, it contains the details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values include: "Canceled",
"Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains the Job completion
percentage. The value is an estimate and not intended to be used to predict Job completion
times. To determine if the JobOutput is complete, use the State property.
:vartype progress: int
:param label: A label that is assigned to a JobOutput in order to help uniquely identify it.
This is useful when your Transform has more than one TransformOutput, whereby your Job has more
than one JobOutput. In such cases, when you submit the Job, you will add two or more
JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you
retrieve the Job, either through events or on a GET request, you can use the label to easily
identify the JobOutput. If a label is not provided, a default value of
'{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in
the corresponding TransformOutput and the output index is the relative index of the this
JobOutput within the Job. Note that this index is the same as the relative index of the
corresponding TransformOutput within its Transform.
:type label: str
:ivar start_time: The UTC date and time at which this Job Output began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job Output finished processing.
:vartype end_time: ~datetime.datetime
"""
_validation = {
'odata_type': {'required': True},
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
'label': {'key': 'label', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.JobOutputAsset': 'JobOutputAsset'}
}
def __init__(
self,
**kwargs
):
super(JobOutput, self).__init__(**kwargs)
self.odata_type = None # type: Optional[str]
self.error = None
self.state = None
self.progress = None
self.label = kwargs.get('label', None)
self.start_time = None
self.end_time = None
class JobOutputAsset(JobOutput):
"""Represents an Asset used as a JobOutput.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:ivar error: If the JobOutput is in the Error state, it contains the details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values include: "Canceled",
"Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled".
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains the Job completion
percentage. The value is an estimate and not intended to be used to predict Job completion
times. To determine if the JobOutput is complete, use the State property.
:vartype progress: int
:param label: A label that is assigned to a JobOutput in order to help uniquely identify it.
This is useful when your Transform has more than one TransformOutput, whereby your Job has more
than one JobOutput. In such cases, when you submit the Job, you will add two or more
JobOutputs, in the same order as TransformOutputs in the Transform. Subsequently, when you
retrieve the Job, either through events or on a GET request, you can use the label to easily
identify the JobOutput. If a label is not provided, a default value of
'{presetName}_{outputIndex}' will be used, where the preset name is the name of the preset in
the corresponding TransformOutput and the output index is the relative index of the this
JobOutput within the Job. Note that this index is the same as the relative index of the
corresponding TransformOutput within its Transform.
:type label: str
:ivar start_time: The UTC date and time at which this Job Output began processing.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which this Job Output finished processing.
:vartype end_time: ~datetime.datetime
:param asset_name: Required. The name of the output Asset.
:type asset_name: str
"""
_validation = {
'odata_type': {'required': True},
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'asset_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'str'},
'progress': {'key': 'progress', 'type': 'int'},
'label': {'key': 'label', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'asset_name': {'key': 'assetName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobOutputAsset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JobOutputAsset' # type: str
self.asset_name = kwargs['asset_name']
class JpgFormat(ImageFormat):
"""Describes the settings for producing JPEG thumbnails.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JpgFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgFormat' # type: str
class JpgImage(Image):
"""Describes the properties for producing a series of JPEG images from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
:param layers: A collection of output JPEG image layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.JpgLayer]
:param sprite_column: Sets the number of columns used in thumbnail sprite image. The number of
rows are automatically calculated and a VTT file is generated with the coordinate mappings for
each thumbnail in the sprite. Note: this value should be a positive integer and a proper value
is recommended so that the output image resolution will not go beyond JPEG maximum pixel
resolution limit 65535x65535.
:type sprite_column: int
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[JpgLayer]'},
'sprite_column': {'key': 'spriteColumn', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JpgImage, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgImage' # type: str
self.layers = kwargs.get('layers', None)
self.sprite_column = kwargs.get('sprite_column', None)
class JpgLayer(Layer):
"""Describes the settings to produce a JPEG image from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
:param quality: The compression quality of the JPEG output. Range is from 0-100 and the default
is 70.
:type quality: int
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'quality': {'key': 'quality', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JpgLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.JpgLayer' # type: str
self.quality = kwargs.get('quality', None)
class KeyDelivery(msrest.serialization.Model):
"""KeyDelivery.
:param access_control: The access control properties for Key Delivery.
:type access_control: ~azure.mgmt.media.models.AccessControl
"""
_attribute_map = {
'access_control': {'key': 'accessControl', 'type': 'AccessControl'},
}
def __init__(
self,
**kwargs
):
super(KeyDelivery, self).__init__(**kwargs)
self.access_control = kwargs.get('access_control', None)
class KeyVaultProperties(msrest.serialization.Model):
"""KeyVaultProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:param key_identifier: The URL of the Key Vault key used to encrypt the account. The key may
either be versioned (for example https://vault/keys/mykey/version1) or reference a key without
a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt the Media Services account,
including the key version.
:vartype current_key_identifier: str
"""
_validation = {
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs.get('key_identifier', None)
self.current_key_identifier = None
class ListContainerSasInput(msrest.serialization.Model):
"""The parameters to the list SAS request.
:param permissions: The permissions to set on the SAS URL. Possible values include: "Read",
"ReadWrite", "ReadWriteDelete".
:type permissions: str or ~azure.mgmt.media.models.AssetContainerPermission
:param expiry_time: The SAS URL expiration time. This must be less than 24 hours from the
current time.
:type expiry_time: ~datetime.datetime
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListContainerSasInput, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.expiry_time = kwargs.get('expiry_time', None)
class ListContentKeysResponse(msrest.serialization.Model):
"""Class of response for listContentKeys action.
:param content_keys: ContentKeys used by current Streaming Locator.
:type content_keys: list[~azure.mgmt.media.models.StreamingLocatorContentKey]
"""
_attribute_map = {
'content_keys': {'key': 'contentKeys', 'type': '[StreamingLocatorContentKey]'},
}
def __init__(
self,
**kwargs
):
super(ListContentKeysResponse, self).__init__(**kwargs)
self.content_keys = kwargs.get('content_keys', None)
class ListEdgePoliciesInput(msrest.serialization.Model):
"""ListEdgePoliciesInput.
:param device_id: Unique identifier of the edge device.
:type device_id: str
"""
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListEdgePoliciesInput, self).__init__(**kwargs)
self.device_id = kwargs.get('device_id', None)
class ListPathsResponse(msrest.serialization.Model):
"""Class of response for listPaths action.
:param streaming_paths: Streaming Paths supported by current Streaming Locator.
:type streaming_paths: list[~azure.mgmt.media.models.StreamingPath]
:param download_paths: Download Paths supported by current Streaming Locator.
:type download_paths: list[str]
"""
_attribute_map = {
'streaming_paths': {'key': 'streamingPaths', 'type': '[StreamingPath]'},
'download_paths': {'key': 'downloadPaths', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ListPathsResponse, self).__init__(**kwargs)
self.streaming_paths = kwargs.get('streaming_paths', None)
self.download_paths = kwargs.get('download_paths', None)
class ListStreamingLocatorsResponse(msrest.serialization.Model):
"""The Streaming Locators associated with this Asset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar streaming_locators: The list of Streaming Locators.
:vartype streaming_locators: list[~azure.mgmt.media.models.AssetStreamingLocator]
"""
_validation = {
'streaming_locators': {'readonly': True},
}
_attribute_map = {
'streaming_locators': {'key': 'streamingLocators', 'type': '[AssetStreamingLocator]'},
}
def __init__(
self,
**kwargs
):
super(ListStreamingLocatorsResponse, self).__init__(**kwargs)
self.streaming_locators = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class LiveEvent(TrackedResource):
"""The live event.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param description: A description for the live event.
:type description: str
:param input: Live event input settings. It defines how the live event receives input from a
contribution encoder.
:type input: ~azure.mgmt.media.models.LiveEventInput
:param preview: Live event preview settings. Preview allows live event producers to preview the
live streaming content without creating any live output.
:type preview: ~azure.mgmt.media.models.LiveEventPreview
:param encoding: Encoding settings for the live event. It configures whether a live encoder is
used for the live event and settings for the live encoder if it is used.
:type encoding: ~azure.mgmt.media.models.LiveEventEncoding
:param transcriptions: Live transcription settings for the live event. See
https://go.microsoft.com/fwlink/?linkid=2133742 for more information about the live
transcription feature.
:type transcriptions: list[~azure.mgmt.media.models.LiveEventTranscription]
:ivar provisioning_state: The provisioning state of the live event.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the live event. See
https://go.microsoft.com/fwlink/?linkid=2139012 for more information. Possible values include:
"Stopped", "Allocating", "StandBy", "Starting", "Running", "Stopping", "Deleting".
:vartype resource_state: str or ~azure.mgmt.media.models.LiveEventResourceState
:param cross_site_access_policies: Live event cross site access policies.
:type cross_site_access_policies: ~azure.mgmt.media.models.CrossSiteAccessPolicies
:param use_static_hostname: Specifies whether a static hostname would be assigned to the live
event preview and ingest endpoints. This value can only be updated if the live event is in
Standby state.
:type use_static_hostname: bool
:param hostname_prefix: When useStaticHostname is set to true, the hostnamePrefix specifies the
first part of the hostname assigned to the live event preview and ingest endpoints. The final
hostname would be a combination of this prefix, the media service account name and a short code
for the Azure Media Services data center.
:type hostname_prefix: str
:param stream_options: The options to use for the LiveEvent. This value is specified at
creation time and cannot be updated. The valid values for the array entry values are 'Default'
and 'LowLatency'.
:type stream_options: list[str or ~azure.mgmt.media.models.StreamOptionsFlag]
:ivar created: The creation time for the live event.
:vartype created: ~datetime.datetime
:ivar last_modified: The last modified time of the live event.
:vartype last_modified: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'input': {'key': 'properties.input', 'type': 'LiveEventInput'},
'preview': {'key': 'properties.preview', 'type': 'LiveEventPreview'},
'encoding': {'key': 'properties.encoding', 'type': 'LiveEventEncoding'},
'transcriptions': {'key': 'properties.transcriptions', 'type': '[LiveEventTranscription]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'cross_site_access_policies': {'key': 'properties.crossSiteAccessPolicies', 'type': 'CrossSiteAccessPolicies'},
'use_static_hostname': {'key': 'properties.useStaticHostname', 'type': 'bool'},
'hostname_prefix': {'key': 'properties.hostnamePrefix', 'type': 'str'},
'stream_options': {'key': 'properties.streamOptions', 'type': '[str]'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(LiveEvent, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.input = kwargs.get('input', None)
self.preview = kwargs.get('preview', None)
self.encoding = kwargs.get('encoding', None)
self.transcriptions = kwargs.get('transcriptions', None)
self.provisioning_state = None
self.resource_state = None
self.cross_site_access_policies = kwargs.get('cross_site_access_policies', None)
self.use_static_hostname = kwargs.get('use_static_hostname', None)
self.hostname_prefix = kwargs.get('hostname_prefix', None)
self.stream_options = kwargs.get('stream_options', None)
self.created = None
self.last_modified = None
class LiveEventActionInput(msrest.serialization.Model):
"""The LiveEvent action input parameter definition.
:param remove_outputs_on_stop: The flag indicates whether live outputs are automatically
deleted when live event is being stopped. Deleting live outputs do not delete the underlying
assets.
:type remove_outputs_on_stop: bool
"""
_attribute_map = {
'remove_outputs_on_stop': {'key': 'removeOutputsOnStop', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(LiveEventActionInput, self).__init__(**kwargs)
self.remove_outputs_on_stop = kwargs.get('remove_outputs_on_stop', None)
class LiveEventEncoding(msrest.serialization.Model):
"""Specifies the live event type and optional encoding settings for encoding live events.
:param encoding_type: Live event type. When encodingType is set to None, the service simply
passes through the incoming video and audio layer(s) to the output. When encodingType is set to
Standard or Premium1080p, a live encoder transcodes the incoming stream into multiple bitrates
or layers. See https://go.microsoft.com/fwlink/?linkid=2095101 for more information. This
property cannot be modified after the live event is created. Possible values include: "None",
"Standard", "Premium1080p".
:type encoding_type: str or ~azure.mgmt.media.models.LiveEventEncodingType
:param preset_name: The optional encoding preset name, used when encodingType is not None. This
value is specified at creation time and cannot be updated. If the encodingType is set to
Standard, then the default preset name is ‘Default720p’. Else if the encodingType is set to
Premium1080p, the default preset is ‘Default1080p’.
:type preset_name: str
:param stretch_mode: Specifies how the input video will be resized to fit the desired output
resolution(s). Default is None. Possible values include: "None", "AutoSize", "AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param key_frame_interval: Use an ISO 8601 time value between 0.5 to 20 seconds to specify the
output fragment length for the video and audio tracks of an encoding live event. For example,
use PT2S to indicate 2 seconds. For the video track it also defines the key frame interval, or
the length of a GoP (group of pictures). If this value is not set for an encoding live event,
the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live
events.
:type key_frame_interval: ~datetime.timedelta
"""
_attribute_map = {
'encoding_type': {'key': 'encodingType', 'type': 'str'},
'preset_name': {'key': 'presetName', 'type': 'str'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(LiveEventEncoding, self).__init__(**kwargs)
self.encoding_type = kwargs.get('encoding_type', None)
self.preset_name = kwargs.get('preset_name', None)
self.stretch_mode = kwargs.get('stretch_mode', None)
self.key_frame_interval = kwargs.get('key_frame_interval', None)
class LiveEventEndpoint(msrest.serialization.Model):
"""The live event endpoint.
:param protocol: The endpoint protocol.
:type protocol: str
:param url: The endpoint URL.
:type url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventEndpoint, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.url = kwargs.get('url', None)
class LiveEventInput(msrest.serialization.Model):
"""The live event input.
All required parameters must be populated in order to send to Azure.
:param streaming_protocol: Required. The input protocol for the live event. This is specified
at creation time and cannot be updated. Possible values include: "FragmentedMP4", "RTMP".
:type streaming_protocol: str or ~azure.mgmt.media.models.LiveEventInputProtocol
:param access_control: Access control for live event input.
:type access_control: ~azure.mgmt.media.models.LiveEventInputAccessControl
:param key_frame_interval_duration: ISO 8601 time duration of the key frame interval duration
of the input. This value sets the EXT-X-TARGETDURATION property in the HLS output. For example,
use PT2S to indicate 2 seconds. Leave the value empty for encoding live events.
:type key_frame_interval_duration: str
:param access_token: A UUID in string form to uniquely identify the stream. This can be
specified at creation time but cannot be updated. If omitted, the service will generate a
unique value.
:type access_token: str
:param endpoints: The input endpoints for the live event.
:type endpoints: list[~azure.mgmt.media.models.LiveEventEndpoint]
"""
_validation = {
'streaming_protocol': {'required': True},
}
_attribute_map = {
'streaming_protocol': {'key': 'streamingProtocol', 'type': 'str'},
'access_control': {'key': 'accessControl', 'type': 'LiveEventInputAccessControl'},
'key_frame_interval_duration': {'key': 'keyFrameIntervalDuration', 'type': 'str'},
'access_token': {'key': 'accessToken', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': '[LiveEventEndpoint]'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInput, self).__init__(**kwargs)
self.streaming_protocol = kwargs['streaming_protocol']
self.access_control = kwargs.get('access_control', None)
self.key_frame_interval_duration = kwargs.get('key_frame_interval_duration', None)
self.access_token = kwargs.get('access_token', None)
self.endpoints = kwargs.get('endpoints', None)
class LiveEventInputAccessControl(msrest.serialization.Model):
"""The IP access control for live event input.
:param ip: The IP access control properties.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInputAccessControl, self).__init__(**kwargs)
self.ip = kwargs.get('ip', None)
class LiveEventInputTrackSelection(msrest.serialization.Model):
"""A track selection condition. This property is reserved for future use, any value set on this property will be ignored.
:param property: Property name to select. This property is reserved for future use, any value
set on this property will be ignored.
:type property: str
:param operation: Comparing operation. This property is reserved for future use, any value set
on this property will be ignored.
:type operation: str
:param value: Property value to select. This property is reserved for future use, any value set
on this property will be ignored.
:type value: str
"""
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventInputTrackSelection, self).__init__(**kwargs)
self.property = kwargs.get('property', None)
self.operation = kwargs.get('operation', None)
self.value = kwargs.get('value', None)
class LiveEventListResult(msrest.serialization.Model):
"""The LiveEvent list result.
:param value: The result of the List Live Event operation.
:type value: list[~azure.mgmt.media.models.LiveEvent]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of live outputs.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LiveEvent]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class LiveEventOutputTranscriptionTrack(msrest.serialization.Model):
"""Describes a transcription track in the output of a live event, generated using speech-to-text transcription. This property is reserved for future use, any value set on this property will be ignored.
All required parameters must be populated in order to send to Azure.
:param track_name: Required. The output track name. This property is reserved for future use,
any value set on this property will be ignored.
:type track_name: str
"""
_validation = {
'track_name': {'required': True},
}
_attribute_map = {
'track_name': {'key': 'trackName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventOutputTranscriptionTrack, self).__init__(**kwargs)
self.track_name = kwargs['track_name']
class LiveEventPreview(msrest.serialization.Model):
"""Live event preview settings.
:param endpoints: The endpoints for preview. Do not share the preview URL with the live event
audience.
:type endpoints: list[~azure.mgmt.media.models.LiveEventEndpoint]
:param access_control: The access control for live event preview.
:type access_control: ~azure.mgmt.media.models.LiveEventPreviewAccessControl
:param preview_locator: The identifier of the preview locator in Guid format. Specifying this
at creation time allows the caller to know the preview locator url before the event is created.
If omitted, the service will generate a random identifier. This value cannot be updated once
the live event is created.
:type preview_locator: str
:param streaming_policy_name: The name of streaming policy used for the live event preview.
This value is specified at creation time and cannot be updated.
:type streaming_policy_name: str
:param alternative_media_id: An alternative media identifier associated with the streaming
locator created for the preview. This value is specified at creation time and cannot be
updated. The identifier can be used in the CustomLicenseAcquisitionUrlTemplate or the
CustomKeyAcquisitionUrlTemplate of the StreamingPolicy specified in the StreamingPolicyName
field.
:type alternative_media_id: str
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': '[LiveEventEndpoint]'},
'access_control': {'key': 'accessControl', 'type': 'LiveEventPreviewAccessControl'},
'preview_locator': {'key': 'previewLocator', 'type': 'str'},
'streaming_policy_name': {'key': 'streamingPolicyName', 'type': 'str'},
'alternative_media_id': {'key': 'alternativeMediaId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveEventPreview, self).__init__(**kwargs)
self.endpoints = kwargs.get('endpoints', None)
self.access_control = kwargs.get('access_control', None)
self.preview_locator = kwargs.get('preview_locator', None)
self.streaming_policy_name = kwargs.get('streaming_policy_name', None)
self.alternative_media_id = kwargs.get('alternative_media_id', None)
class LiveEventPreviewAccessControl(msrest.serialization.Model):
"""The IP access control for the live event preview endpoint.
:param ip: The IP access control properties.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(LiveEventPreviewAccessControl, self).__init__(**kwargs)
self.ip = kwargs.get('ip', None)
class LiveEventTranscription(msrest.serialization.Model):
"""Describes the transcription tracks in the output of a live event, generated using speech-to-text transcription. This property is reserved for future use, any value set on this property will be ignored.
:param language: Specifies the language (locale) to be used for speech-to-text transcription –
it should match the spoken language in the audio track. The value should be in BCP-47 format
(e.g: 'en-US'). See https://go.microsoft.com/fwlink/?linkid=2133742 for more information about
the live transcription feature and the list of supported languages.
:type language: str
:param input_track_selection: Provides a mechanism to select the audio track in the input live
feed, to which speech-to-text transcription is applied. This property is reserved for future
use, any value set on this property will be ignored.
:type input_track_selection: list[~azure.mgmt.media.models.LiveEventInputTrackSelection]
:param output_transcription_track: Describes a transcription track in the output of a live
event, generated using speech-to-text transcription. This property is reserved for future use,
any value set on this property will be ignored.
:type output_transcription_track: ~azure.mgmt.media.models.LiveEventOutputTranscriptionTrack
"""
_attribute_map = {
'language': {'key': 'language', 'type': 'str'},
'input_track_selection': {'key': 'inputTrackSelection', 'type': '[LiveEventInputTrackSelection]'},
'output_transcription_track': {'key': 'outputTranscriptionTrack', 'type': 'LiveEventOutputTranscriptionTrack'},
}
def __init__(
self,
**kwargs
):
super(LiveEventTranscription, self).__init__(**kwargs)
self.language = kwargs.get('language', None)
self.input_track_selection = kwargs.get('input_track_selection', None)
self.output_transcription_track = kwargs.get('output_transcription_track', None)
class LiveOutput(ProxyResource):
"""The Live Output.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param description: The description of the live output.
:type description: str
:param asset_name: The asset that the live output will write to.
:type asset_name: str
:param archive_window_length: ISO 8601 time between 1 minute to 25 hours to indicate the
maximum content length that can be archived in the asset for this live output. This also sets
the maximum content length for the rewind window. For example, use PT1H30M to indicate 1 hour
and 30 minutes of archive window.
:type archive_window_length: ~datetime.timedelta
:param manifest_name: The manifest file name. If not provided, the service will generate one
automatically.
:type manifest_name: str
:param hls: HTTP Live Streaming (HLS) packing setting for the live output.
:type hls: ~azure.mgmt.media.models.Hls
:param output_snap_time: The initial timestamp that the live output will start at, any content
before this value will not be archived.
:type output_snap_time: long
:ivar created: The creation time the live output.
:vartype created: ~datetime.datetime
:ivar last_modified: The time the live output was last modified.
:vartype last_modified: ~datetime.datetime
:ivar provisioning_state: The provisioning state of the live output.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the live output. Possible values include:
"Creating", "Running", "Deleting".
:vartype resource_state: str or ~azure.mgmt.media.models.LiveOutputResourceState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'asset_name': {'key': 'properties.assetName', 'type': 'str'},
'archive_window_length': {'key': 'properties.archiveWindowLength', 'type': 'duration'},
'manifest_name': {'key': 'properties.manifestName', 'type': 'str'},
'hls': {'key': 'properties.hls', 'type': 'Hls'},
'output_snap_time': {'key': 'properties.outputSnapTime', 'type': 'long'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveOutput, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.asset_name = kwargs.get('asset_name', None)
self.archive_window_length = kwargs.get('archive_window_length', None)
self.manifest_name = kwargs.get('manifest_name', None)
self.hls = kwargs.get('hls', None)
self.output_snap_time = kwargs.get('output_snap_time', None)
self.created = None
self.last_modified = None
self.provisioning_state = None
self.resource_state = None
class LiveOutputListResult(msrest.serialization.Model):
"""The LiveOutput list result.
:param value: The result of the List LiveOutput operation.
:type value: list[~azure.mgmt.media.models.LiveOutput]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of live outputs.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LiveOutput]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveOutputListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class Location(msrest.serialization.Model):
"""Location.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Location, self).__init__(**kwargs)
self.name = kwargs['name']
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MediaService(TrackedResource):
"""A Media Services account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The Managed Identity for the Media Services account.
:type identity: ~azure.mgmt.media.models.MediaServiceIdentity
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar media_service_id: The Media Services account ID.
:vartype media_service_id: str
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~azure.mgmt.media.models.StorageAccount]
:param storage_authentication: Possible values include: "System", "ManagedIdentity".
:type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication
:param encryption: The account encryption properties.
:type encryption: ~azure.mgmt.media.models.AccountEncryption
:param key_delivery: The Key Delivery properties for Media Services account.
:type key_delivery: ~azure.mgmt.media.models.KeyDelivery
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'media_service_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'},
}
def __init__(
self,
**kwargs
):
super(MediaService, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.system_data = None
self.media_service_id = None
self.storage_accounts = kwargs.get('storage_accounts', None)
self.storage_authentication = kwargs.get('storage_authentication', None)
self.encryption = kwargs.get('encryption', None)
self.key_delivery = kwargs.get('key_delivery', None)
class MediaServiceCollection(msrest.serialization.Model):
"""A collection of MediaService items.
:param value: A collection of MediaService items.
:type value: list[~azure.mgmt.media.models.MediaService]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MediaService]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class MediaServiceIdentity(msrest.serialization.Model):
"""MediaServiceIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type. Possible values include: "SystemAssigned", "None".
:type type: str or ~azure.mgmt.media.models.ManagedIdentityType
:ivar principal_id: The Principal ID of the identity.
:vartype principal_id: str
:ivar tenant_id: The Tenant ID of the identity.
:vartype tenant_id: str
"""
_validation = {
'type': {'required': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.principal_id = None
self.tenant_id = None
class MediaServiceUpdate(msrest.serialization.Model):
"""A Media Services account update.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The Managed Identity for the Media Services account.
:type identity: ~azure.mgmt.media.models.MediaServiceIdentity
:ivar media_service_id: The Media Services account ID.
:vartype media_service_id: str
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~azure.mgmt.media.models.StorageAccount]
:param storage_authentication: Possible values include: "System", "ManagedIdentity".
:type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication
:param encryption: The account encryption properties.
:type encryption: ~azure.mgmt.media.models.AccountEncryption
:param key_delivery: The Key Delivery properties for Media Services account.
:type key_delivery: ~azure.mgmt.media.models.KeyDelivery
"""
_validation = {
'media_service_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'},
'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'},
}
def __init__(
self,
**kwargs
):
super(MediaServiceUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.media_service_id = None
self.storage_accounts = kwargs.get('storage_accounts', None)
self.storage_authentication = kwargs.get('storage_authentication', None)
self.encryption = kwargs.get('encryption', None)
self.key_delivery = kwargs.get('key_delivery', None)
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~azure.mgmt.media.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~azure.mgmt.media.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class MultiBitrateFormat(Format):
"""Describes the properties for producing a collection of GOP aligned multi-bitrate files. The default behavior is to produce one output file for each video layer which is muxed together with all the audios. The exact output files produced can be controlled by specifying the outputFiles collection.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Mp4Format, TransportStreamFormat.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.Mp4Format': 'Mp4Format', '#Microsoft.Media.TransportStreamFormat': 'TransportStreamFormat'}
}
def __init__(
self,
**kwargs
):
super(MultiBitrateFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.MultiBitrateFormat' # type: str
self.output_files = kwargs.get('output_files', None)
class Mp4Format(MultiBitrateFormat):
"""Describes the properties for an output ISO MP4 file.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
def __init__(
self,
**kwargs
):
super(Mp4Format, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.Mp4Format' # type: str
class NoEncryption(msrest.serialization.Model):
"""Class for NoEncryption scheme.
:param enabled_protocols: Representing supported protocols.
:type enabled_protocols: ~azure.mgmt.media.models.EnabledProtocols
"""
_attribute_map = {
'enabled_protocols': {'key': 'enabledProtocols', 'type': 'EnabledProtocols'},
}
def __init__(
self,
**kwargs
):
super(NoEncryption, self).__init__(**kwargs)
self.enabled_protocols = kwargs.get('enabled_protocols', None)
class ODataError(msrest.serialization.Model):
"""Information about an error.
:param code: A language-independent error name.
:type code: str
:param message: The error message.
:type message: str
:param target: The target of the error (for example, the name of the property in error).
:type target: str
:param details: The error details.
:type details: list[~azure.mgmt.media.models.ODataError]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ODataError]'},
}
def __init__(
self,
**kwargs
):
super(ODataError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~azure.mgmt.media.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~azure.mgmt.media.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~azure.mgmt.media.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~azure.mgmt.media.models.Operation]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OutputFile(msrest.serialization.Model):
"""Represents an output file produced.
All required parameters must be populated in order to send to Azure.
:param labels: Required. The list of labels that describe how the encoder should multiplex
video and audio into an output file. For example, if the encoder is producing two video layers
with labels v1 and v2, and one audio layer with label a1, then an array like '[v1, a1]' tells
the encoder to produce an output file with the video track represented by v1 and the audio
track represented by a1.
:type labels: list[str]
"""
_validation = {
'labels': {'required': True},
}
_attribute_map = {
'labels': {'key': 'labels', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(OutputFile, self).__init__(**kwargs)
self.labels = kwargs['labels']
class PngFormat(ImageFormat):
"""Describes the settings for producing PNG thumbnails.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PngFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngFormat' # type: str
class PngImage(Image):
"""Describes the properties for producing a series of PNG images from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param label: An optional label for the codec. The label can be used to control muxing
behavior.
:type label: str
:param key_frame_interval: The distance between two key frames. The value should be non-zero in
the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note
that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval
value will follow the input source setting.
:type key_frame_interval: ~datetime.timedelta
:param stretch_mode: The resizing mode - how the input video will be resized to fit the desired
output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize",
"AutoFit".
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr",
"Vfr".
:type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode
:param start: Required. The position in the input video from where to start generating
thumbnails. The value can be in ISO 8601 format (For example, PT05S to start at 5 seconds), or
a frame count (For example, 10 to start at the 10th frame), or a relative value to stream
duration (For example, 10% to start at 10% of stream duration). Also supports a macro {Best},
which tells the encoder to select the best thumbnail from the first few seconds of the video
and will only produce one thumbnail, no matter what other settings are for Step and Range. The
default value is macro {Best}.
:type start: str
:param step: The intervals at which thumbnails are generated. The value can be in ISO 8601
format (For example, PT05S for one image every 5 seconds), or a frame count (For example, 30
for one image every 30 frames), or a relative value to stream duration (For example, 10% for
one image every 10% of stream duration). Note: Step value will affect the first generated
thumbnail, which may not be exactly the one specified at transform preset start time. This is
due to the encoder, which tries to select the best thumbnail between start time and Step
position from start time as the first output. As the default value is 10%, it means if stream
has long duration, the first generated thumbnail might be far away from the one specified at
start time. Try to select reasonable value for Step if the first thumbnail is expected close to
start time, or set Range value at 1 if only one thumbnail is needed at start time.
:type step: str
:param range: The position relative to transform preset start time in the input video at which
to stop generating thumbnails. The value can be in ISO 8601 format (For example, PT5M30S to
stop at 5 minutes and 30 seconds from start time), or a frame count (For example, 300 to stop
at the 300th frame from the frame at start time. If this value is 1, it means only producing
one thumbnail at start time), or a relative value to the stream duration (For example, 50% to
stop at half of stream duration from start time). The default value is 100%, which means to
stop at the end of the stream.
:type range: str
:param layers: A collection of output PNG image layers to be produced by the encoder.
:type layers: list[~azure.mgmt.media.models.PngLayer]
"""
_validation = {
'odata_type': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'sync_mode': {'key': 'syncMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
'layers': {'key': 'layers', 'type': '[PngLayer]'},
}
def __init__(
self,
**kwargs
):
super(PngImage, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngImage' # type: str
self.layers = kwargs.get('layers', None)
class PngLayer(Layer):
"""Describes the settings to produce a PNG image from the input video.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param width: The width of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in width as the input.
:type width: str
:param height: The height of the output video for this layer. The value can be absolute (in
pixels) or relative (in percentage). For example 50% means the output video has half as many
pixels in height as the input.
:type height: str
:param label: The alphanumeric label for this layer, which can be used in multiplexing
different video and audio layers, or in naming the output file.
:type label: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PngLayer, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.PngLayer' # type: str
class PresentationTimeRange(msrest.serialization.Model):
"""The presentation time range, this is asset related and not recommended for Account Filter.
:param start_timestamp: The absolute start time boundary.
:type start_timestamp: long
:param end_timestamp: The absolute end time boundary.
:type end_timestamp: long
:param presentation_window_duration: The relative to end sliding window.
:type presentation_window_duration: long
:param live_backoff_duration: The relative to end right edge.
:type live_backoff_duration: long
:param timescale: The time scale of time stamps.
:type timescale: long
:param force_end_timestamp: The indicator of forcing existing of end time stamp.
:type force_end_timestamp: bool
"""
_attribute_map = {
'start_timestamp': {'key': 'startTimestamp', 'type': 'long'},
'end_timestamp': {'key': 'endTimestamp', 'type': 'long'},
'presentation_window_duration': {'key': 'presentationWindowDuration', 'type': 'long'},
'live_backoff_duration': {'key': 'liveBackoffDuration', 'type': 'long'},
'timescale': {'key': 'timescale', 'type': 'long'},
'force_end_timestamp': {'key': 'forceEndTimestamp', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(PresentationTimeRange, self).__init__(**kwargs)
self.start_timestamp = kwargs.get('start_timestamp', None)
self.end_timestamp = kwargs.get('end_timestamp', None)
self.presentation_window_duration = kwargs.get('presentation_window_duration', None)
self.live_backoff_duration = kwargs.get('live_backoff_duration', None)
self.timescale = kwargs.get('timescale', None)
self.force_end_timestamp = kwargs.get('force_end_timestamp', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.media.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.media.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.media.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~azure.mgmt.media.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.media.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~azure.mgmt.media.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""The service specification property.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~azure.mgmt.media.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class Provider(msrest.serialization.Model):
"""A resource provider.
All required parameters must be populated in order to send to Azure.
:param provider_name: Required. The provider name.
:type provider_name: str
"""
_validation = {
'provider_name': {'required': True},
}
_attribute_map = {
'provider_name': {'key': 'providerName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Provider, self).__init__(**kwargs)
self.provider_name = kwargs['provider_name']
class Rectangle(msrest.serialization.Model):
"""Describes the properties of a rectangular window applied to the input media before processing it.
:param left: The number of pixels from the left-margin. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example, 50%).
:type left: str
:param top: The number of pixels from the top-margin. This can be absolute pixel value (e.g
100), or relative to the size of the video (For example, 50%).
:type top: str
:param width: The width of the rectangular region in pixels. This can be absolute pixel value
(e.g 100), or relative to the size of the video (For example, 50%).
:type width: str
:param height: The height of the rectangular region in pixels. This can be absolute pixel value
(e.g 100), or relative to the size of the video (For example, 50%).
:type height: str
"""
_attribute_map = {
'left': {'key': 'left', 'type': 'str'},
'top': {'key': 'top', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'height': {'key': 'height', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Rectangle, self).__init__(**kwargs)
self.left = kwargs.get('left', None)
self.top = kwargs.get('top', None)
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
class SelectAudioTrackByAttribute(AudioTrackDescriptor):
"""Select audio tracks from the input by specifying an attribute and an attribute filter.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
:param attribute: Required. The TrackAttribute to filter the tracks by. Possible values
include: "Bitrate", "Language".
:type attribute: str or ~azure.mgmt.media.models.TrackAttribute
:param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to
select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals".
:type filter: str or ~azure.mgmt.media.models.AttributeFilter
:param filter_value: The value to filter the tracks by. Only used when
AttributeFilter.ValueEquals is specified for the Filter property.
:type filter_value: str
"""
_validation = {
'odata_type': {'required': True},
'attribute': {'required': True},
'filter': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
'attribute': {'key': 'attribute', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'str'},
'filter_value': {'key': 'filterValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelectAudioTrackByAttribute, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectAudioTrackByAttribute' # type: str
self.attribute = kwargs['attribute']
self.filter = kwargs['filter']
self.filter_value = kwargs.get('filter_value', None)
class SelectAudioTrackById(AudioTrackDescriptor):
"""Select audio tracks from the input by specifying a track identifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param channel_mapping: Optional designation for single channel audio tracks. Can be used to
combine the tracks into stereo or multi-channel audio tracks. Possible values include:
"FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight",
"StereoLeft", "StereoRight".
:type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping
:param track_id: Required. Track identifier to select.
:type track_id: long
"""
_validation = {
'odata_type': {'required': True},
'track_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'channel_mapping': {'key': 'channelMapping', 'type': 'str'},
'track_id': {'key': 'trackId', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SelectAudioTrackById, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectAudioTrackById' # type: str
self.track_id = kwargs['track_id']
class VideoTrackDescriptor(TrackDescriptor):
"""A TrackSelection to select video tracks.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odata_type': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'}
}
def __init__(
self,
**kwargs
):
super(VideoTrackDescriptor, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoTrackDescriptor' # type: str
class SelectVideoTrackByAttribute(VideoTrackDescriptor):
"""Select video tracks from the input by specifying an attribute and an attribute filter.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param attribute: Required. The TrackAttribute to filter the tracks by. Possible values
include: "Bitrate", "Language".
:type attribute: str or ~azure.mgmt.media.models.TrackAttribute
:param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to
select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals".
:type filter: str or ~azure.mgmt.media.models.AttributeFilter
:param filter_value: The value to filter the tracks by. Only used when
AttributeFilter.ValueEquals is specified for the Filter property. For TrackAttribute.Bitrate,
this should be an integer value in bits per second (e.g: '1500000'). The
TrackAttribute.Language is not supported for video tracks.
:type filter_value: str
"""
_validation = {
'odata_type': {'required': True},
'attribute': {'required': True},
'filter': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'attribute': {'key': 'attribute', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'str'},
'filter_value': {'key': 'filterValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SelectVideoTrackByAttribute, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectVideoTrackByAttribute' # type: str
self.attribute = kwargs['attribute']
self.filter = kwargs['filter']
self.filter_value = kwargs.get('filter_value', None)
class SelectVideoTrackById(VideoTrackDescriptor):
"""Select video tracks from the input by specifying a track identifier.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param track_id: Required. Track identifier to select.
:type track_id: long
"""
_validation = {
'odata_type': {'required': True},
'track_id': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'track_id': {'key': 'trackId', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(SelectVideoTrackById, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.SelectVideoTrackById' # type: str
self.track_id = kwargs['track_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~azure.mgmt.media.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~azure.mgmt.media.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class StandardEncoderPreset(Preset):
"""Describes all the settings to be used when encoding the input video with the Standard Encoder.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filters: One or more filtering operations that are applied to the input media before
encoding.
:type filters: ~azure.mgmt.media.models.Filters
:param codecs: Required. The list of codecs to be used when encoding the input video.
:type codecs: list[~azure.mgmt.media.models.Codec]
:param formats: Required. The list of outputs to be produced by the encoder.
:type formats: list[~azure.mgmt.media.models.Format]
"""
_validation = {
'odata_type': {'required': True},
'codecs': {'required': True},
'formats': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filters': {'key': 'filters', 'type': 'Filters'},
'codecs': {'key': 'codecs', 'type': '[Codec]'},
'formats': {'key': 'formats', 'type': '[Format]'},
}
def __init__(
self,
**kwargs
):
super(StandardEncoderPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.StandardEncoderPreset' # type: str
self.filters = kwargs.get('filters', None)
self.codecs = kwargs['codecs']
self.formats = kwargs['formats']
class StorageAccount(msrest.serialization.Model):
"""The storage account details.
All required parameters must be populated in order to send to Azure.
:param id: The ID of the storage account resource. Media Services relies on tables and queues
as well as blobs, so the primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage). Blob only storage accounts can be added as
secondary storage accounts.
:type id: str
:param type: Required. The type of the storage account. Possible values include: "Primary",
"Secondary".
:type type: str or ~azure.mgmt.media.models.StorageAccountType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.type = kwargs['type']
class StorageEncryptedAssetDecryptionData(msrest.serialization.Model):
"""Data needed to decrypt asset files encrypted with legacy storage encryption.
:param key: The Asset File storage encryption key.
:type key: bytearray
:param asset_file_encryption_metadata: Asset File encryption metadata.
:type asset_file_encryption_metadata:
list[~azure.mgmt.media.models.AssetFileEncryptionMetadata]
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'bytearray'},
'asset_file_encryption_metadata': {'key': 'assetFileEncryptionMetadata', 'type': '[AssetFileEncryptionMetadata]'},
}
def __init__(
self,
**kwargs
):
super(StorageEncryptedAssetDecryptionData, self).__init__(**kwargs)
self.key = kwargs.get('key', None)
self.asset_file_encryption_metadata = kwargs.get('asset_file_encryption_metadata', None)
class StreamingEndpoint(TrackedResource):
"""The streaming endpoint.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param description: The streaming endpoint description.
:type description: str
:param scale_units: The number of scale units. Use the Scale operation to adjust this value.
:type scale_units: int
:param availability_set_name: This feature is deprecated, do not set a value for this property.
:type availability_set_name: str
:param access_control: The access control definition of the streaming endpoint.
:type access_control: ~azure.mgmt.media.models.StreamingEndpointAccessControl
:param max_cache_age: Max cache age.
:type max_cache_age: long
:param custom_host_names: The custom host names of the streaming endpoint.
:type custom_host_names: list[str]
:ivar host_name: The streaming endpoint host name.
:vartype host_name: str
:param cdn_enabled: The CDN enabled flag.
:type cdn_enabled: bool
:param cdn_provider: The CDN provider name.
:type cdn_provider: str
:param cdn_profile: The CDN profile name.
:type cdn_profile: str
:ivar provisioning_state: The provisioning state of the streaming endpoint.
:vartype provisioning_state: str
:ivar resource_state: The resource state of the streaming endpoint. Possible values include:
"Stopped", "Starting", "Running", "Stopping", "Deleting", "Scaling".
:vartype resource_state: str or ~azure.mgmt.media.models.StreamingEndpointResourceState
:param cross_site_access_policies: The streaming endpoint access policies.
:type cross_site_access_policies: ~azure.mgmt.media.models.CrossSiteAccessPolicies
:ivar free_trial_end_time: The free trial expiration time.
:vartype free_trial_end_time: ~datetime.datetime
:ivar created: The exact time the streaming endpoint was created.
:vartype created: ~datetime.datetime
:ivar last_modified: The exact time the streaming endpoint was last modified.
:vartype last_modified: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'system_data': {'readonly': True},
'host_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
'free_trial_end_time': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'description': {'key': 'properties.description', 'type': 'str'},
'scale_units': {'key': 'properties.scaleUnits', 'type': 'int'},
'availability_set_name': {'key': 'properties.availabilitySetName', 'type': 'str'},
'access_control': {'key': 'properties.accessControl', 'type': 'StreamingEndpointAccessControl'},
'max_cache_age': {'key': 'properties.maxCacheAge', 'type': 'long'},
'custom_host_names': {'key': 'properties.customHostNames', 'type': '[str]'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'cdn_enabled': {'key': 'properties.cdnEnabled', 'type': 'bool'},
'cdn_provider': {'key': 'properties.cdnProvider', 'type': 'str'},
'cdn_profile': {'key': 'properties.cdnProfile', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'cross_site_access_policies': {'key': 'properties.crossSiteAccessPolicies', 'type': 'CrossSiteAccessPolicies'},
'free_trial_end_time': {'key': 'properties.freeTrialEndTime', 'type': 'iso-8601'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpoint, self).__init__(**kwargs)
self.system_data = None
self.description = kwargs.get('description', None)
self.scale_units = kwargs.get('scale_units', None)
self.availability_set_name = kwargs.get('availability_set_name', None)
self.access_control = kwargs.get('access_control', None)
self.max_cache_age = kwargs.get('max_cache_age', None)
self.custom_host_names = kwargs.get('custom_host_names', None)
self.host_name = None
self.cdn_enabled = kwargs.get('cdn_enabled', None)
self.cdn_provider = kwargs.get('cdn_provider', None)
self.cdn_profile = kwargs.get('cdn_profile', None)
self.provisioning_state = None
self.resource_state = None
self.cross_site_access_policies = kwargs.get('cross_site_access_policies', None)
self.free_trial_end_time = None
self.created = None
self.last_modified = None
class StreamingEndpointAccessControl(msrest.serialization.Model):
"""Streaming endpoint access control definition.
:param akamai: The access control of Akamai.
:type akamai: ~azure.mgmt.media.models.AkamaiAccessControl
:param ip: The IP access control of the streaming endpoint.
:type ip: ~azure.mgmt.media.models.IPAccessControl
"""
_attribute_map = {
'akamai': {'key': 'akamai', 'type': 'AkamaiAccessControl'},
'ip': {'key': 'ip', 'type': 'IPAccessControl'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpointAccessControl, self).__init__(**kwargs)
self.akamai = kwargs.get('akamai', None)
self.ip = kwargs.get('ip', None)
class StreamingEndpointListResult(msrest.serialization.Model):
"""The streaming endpoint list result.
:param value: The result of the List StreamingEndpoint operation.
:type value: list[~azure.mgmt.media.models.StreamingEndpoint]
:param odata_count: The number of result.
:type odata_count: int
:param odata_next_link: The link to the next set of results. Not empty if value contains
incomplete list of streaming endpoints.
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingEndpoint]'},
'odata_count': {'key': '@odata\\.count', 'type': 'int'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingEndpointListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_count = kwargs.get('odata_count', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingEntityScaleUnit(msrest.serialization.Model):
"""scale units definition.
:param scale_unit: The scale unit number of the streaming endpoint.
:type scale_unit: int
"""
_attribute_map = {
'scale_unit': {'key': 'scaleUnit', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(StreamingEntityScaleUnit, self).__init__(**kwargs)
self.scale_unit = kwargs.get('scale_unit', None)
class StreamingLocator(ProxyResource):
"""A Streaming Locator resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:param asset_name: Asset Name.
:type asset_name: str
:ivar created: The creation time of the Streaming Locator.
:vartype created: ~datetime.datetime
:param start_time: The start time of the Streaming Locator.
:type start_time: ~datetime.datetime
:param end_time: The end time of the Streaming Locator.
:type end_time: ~datetime.datetime
:param streaming_locator_id: The StreamingLocatorId of the Streaming Locator.
:type streaming_locator_id: str
:param streaming_policy_name: Name of the Streaming Policy used by this Streaming Locator.
Either specify the name of Streaming Policy you created or use one of the predefined Streaming
Policies. The predefined Streaming Policies available are: 'Predefined_DownloadOnly',
'Predefined_ClearStreamingOnly', 'Predefined_DownloadAndClearStreaming', 'Predefined_ClearKey',
'Predefined_MultiDrmCencStreaming' and 'Predefined_MultiDrmStreaming'.
:type streaming_policy_name: str
:param default_content_key_policy_name: Name of the default ContentKeyPolicy used by this
Streaming Locator.
:type default_content_key_policy_name: str
:param content_keys: The ContentKeys used by this Streaming Locator.
:type content_keys: list[~azure.mgmt.media.models.StreamingLocatorContentKey]
:param alternative_media_id: Alternative Media ID of this Streaming Locator.
:type alternative_media_id: str
:param filters: A list of asset or account filters which apply to this streaming locator.
:type filters: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'asset_name': {'key': 'properties.assetName', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'streaming_locator_id': {'key': 'properties.streamingLocatorId', 'type': 'str'},
'streaming_policy_name': {'key': 'properties.streamingPolicyName', 'type': 'str'},
'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'},
'content_keys': {'key': 'properties.contentKeys', 'type': '[StreamingLocatorContentKey]'},
'alternative_media_id': {'key': 'properties.alternativeMediaId', 'type': 'str'},
'filters': {'key': 'properties.filters', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocator, self).__init__(**kwargs)
self.system_data = None
self.asset_name = kwargs.get('asset_name', None)
self.created = None
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.streaming_locator_id = kwargs.get('streaming_locator_id', None)
self.streaming_policy_name = kwargs.get('streaming_policy_name', None)
self.default_content_key_policy_name = kwargs.get('default_content_key_policy_name', None)
self.content_keys = kwargs.get('content_keys', None)
self.alternative_media_id = kwargs.get('alternative_media_id', None)
self.filters = kwargs.get('filters', None)
class StreamingLocatorCollection(msrest.serialization.Model):
"""A collection of StreamingLocator items.
:param value: A collection of StreamingLocator items.
:type value: list[~azure.mgmt.media.models.StreamingLocator]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingLocator]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocatorCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingLocatorContentKey(msrest.serialization.Model):
"""Class for content key in Streaming Locator.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. ID of Content Key.
:type id: str
:ivar type: Encryption type of Content Key. Possible values include: "CommonEncryptionCenc",
"CommonEncryptionCbcs", "EnvelopeEncryption".
:vartype type: str or ~azure.mgmt.media.models.StreamingLocatorContentKeyType
:param label_reference_in_streaming_policy: Label of Content Key as specified in the Streaming
Policy.
:type label_reference_in_streaming_policy: str
:param value: Value of Content Key.
:type value: str
:ivar policy_name: ContentKeyPolicy used by Content Key.
:vartype policy_name: str
:ivar tracks: Tracks which use this Content Key.
:vartype tracks: list[~azure.mgmt.media.models.TrackSelection]
"""
_validation = {
'id': {'required': True},
'type': {'readonly': True},
'policy_name': {'readonly': True},
'tracks': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'label_reference_in_streaming_policy': {'key': 'labelReferenceInStreamingPolicy', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'tracks': {'key': 'tracks', 'type': '[TrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(StreamingLocatorContentKey, self).__init__(**kwargs)
self.id = kwargs['id']
self.type = None
self.label_reference_in_streaming_policy = kwargs.get('label_reference_in_streaming_policy', None)
self.value = kwargs.get('value', None)
self.policy_name = None
self.tracks = None
class StreamingPath(msrest.serialization.Model):
"""Class of paths for streaming.
All required parameters must be populated in order to send to Azure.
:param streaming_protocol: Required. Streaming protocol. Possible values include: "Hls",
"Dash", "SmoothStreaming", "Download".
:type streaming_protocol: str or ~azure.mgmt.media.models.StreamingPolicyStreamingProtocol
:param encryption_scheme: Required. Encryption scheme. Possible values include: "NoEncryption",
"EnvelopeEncryption", "CommonEncryptionCenc", "CommonEncryptionCbcs".
:type encryption_scheme: str or ~azure.mgmt.media.models.EncryptionScheme
:param paths: Streaming paths for each protocol and encryptionScheme pair.
:type paths: list[str]
"""
_validation = {
'streaming_protocol': {'required': True},
'encryption_scheme': {'required': True},
}
_attribute_map = {
'streaming_protocol': {'key': 'streamingProtocol', 'type': 'str'},
'encryption_scheme': {'key': 'encryptionScheme', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPath, self).__init__(**kwargs)
self.streaming_protocol = kwargs['streaming_protocol']
self.encryption_scheme = kwargs['encryption_scheme']
self.paths = kwargs.get('paths', None)
class StreamingPolicy(ProxyResource):
"""A Streaming Policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: Creation time of Streaming Policy.
:vartype created: ~datetime.datetime
:param default_content_key_policy_name: Default ContentKey used by current Streaming Policy.
:type default_content_key_policy_name: str
:param envelope_encryption: Configuration of EnvelopeEncryption.
:type envelope_encryption: ~azure.mgmt.media.models.EnvelopeEncryption
:param common_encryption_cenc: Configuration of CommonEncryptionCenc.
:type common_encryption_cenc: ~azure.mgmt.media.models.CommonEncryptionCenc
:param common_encryption_cbcs: Configuration of CommonEncryptionCbcs.
:type common_encryption_cbcs: ~azure.mgmt.media.models.CommonEncryptionCbcs
:param no_encryption: Configurations of NoEncryption.
:type no_encryption: ~azure.mgmt.media.models.NoEncryption
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'},
'envelope_encryption': {'key': 'properties.envelopeEncryption', 'type': 'EnvelopeEncryption'},
'common_encryption_cenc': {'key': 'properties.commonEncryptionCenc', 'type': 'CommonEncryptionCenc'},
'common_encryption_cbcs': {'key': 'properties.commonEncryptionCbcs', 'type': 'CommonEncryptionCbcs'},
'no_encryption': {'key': 'properties.noEncryption', 'type': 'NoEncryption'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicy, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.default_content_key_policy_name = kwargs.get('default_content_key_policy_name', None)
self.envelope_encryption = kwargs.get('envelope_encryption', None)
self.common_encryption_cenc = kwargs.get('common_encryption_cenc', None)
self.common_encryption_cbcs = kwargs.get('common_encryption_cbcs', None)
self.no_encryption = kwargs.get('no_encryption', None)
class StreamingPolicyCollection(msrest.serialization.Model):
"""A collection of StreamingPolicy items.
:param value: A collection of StreamingPolicy items.
:type value: list[~azure.mgmt.media.models.StreamingPolicy]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StreamingPolicy]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class StreamingPolicyContentKey(msrest.serialization.Model):
"""Class to specify properties of content key.
:param label: Label can be used to specify Content Key when creating a Streaming Locator.
:type label: str
:param policy_name: Policy used by Content Key.
:type policy_name: str
:param tracks: Tracks which use this content key.
:type tracks: list[~azure.mgmt.media.models.TrackSelection]
"""
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'tracks': {'key': 'tracks', 'type': '[TrackSelection]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyContentKey, self).__init__(**kwargs)
self.label = kwargs.get('label', None)
self.policy_name = kwargs.get('policy_name', None)
self.tracks = kwargs.get('tracks', None)
class StreamingPolicyContentKeys(msrest.serialization.Model):
"""Class to specify properties of all content keys in Streaming Policy.
:param default_key: Default content key for an encryption scheme.
:type default_key: ~azure.mgmt.media.models.DefaultKey
:param key_to_track_mappings: Representing tracks needs separate content key.
:type key_to_track_mappings: list[~azure.mgmt.media.models.StreamingPolicyContentKey]
"""
_attribute_map = {
'default_key': {'key': 'defaultKey', 'type': 'DefaultKey'},
'key_to_track_mappings': {'key': 'keyToTrackMappings', 'type': '[StreamingPolicyContentKey]'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyContentKeys, self).__init__(**kwargs)
self.default_key = kwargs.get('default_key', None)
self.key_to_track_mappings = kwargs.get('key_to_track_mappings', None)
class StreamingPolicyFairPlayConfiguration(msrest.serialization.Model):
"""Class to specify configurations of FairPlay in Streaming Policy.
All required parameters must be populated in order to send to Azure.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
:param allow_persistent_license: Required. All license to be persistent or not.
:type allow_persistent_license: bool
"""
_validation = {
'allow_persistent_license': {'required': True},
}
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
'allow_persistent_license': {'key': 'allowPersistentLicense', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyFairPlayConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
self.allow_persistent_license = kwargs['allow_persistent_license']
class StreamingPolicyPlayReadyConfiguration(msrest.serialization.Model):
"""Class to specify configurations of PlayReady in Streaming Policy.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
:param play_ready_custom_attributes: Custom attributes for PlayReady.
:type play_ready_custom_attributes: str
"""
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
'play_ready_custom_attributes': {'key': 'playReadyCustomAttributes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyPlayReadyConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
self.play_ready_custom_attributes = kwargs.get('play_ready_custom_attributes', None)
class StreamingPolicyWidevineConfiguration(msrest.serialization.Model):
"""Class to specify configurations of Widevine in Streaming Policy.
:param custom_license_acquisition_url_template: Template for the URL of the custom service
delivering licenses to end user players. Not required when using Azure Media Services for
issuing licenses. The template supports replaceable tokens that the service will update at
runtime with the value specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of
identifier of the key being requested.
:type custom_license_acquisition_url_template: str
"""
_attribute_map = {
'custom_license_acquisition_url_template': {'key': 'customLicenseAcquisitionUrlTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StreamingPolicyWidevineConfiguration, self).__init__(**kwargs)
self.custom_license_acquisition_url_template = kwargs.get('custom_license_acquisition_url_template', None)
class SyncStorageKeysInput(msrest.serialization.Model):
"""The input to the sync storage keys request.
:param id: The ID of the storage account resource.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SyncStorageKeysInput, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.media.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure.mgmt.media.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TrackPropertyCondition(msrest.serialization.Model):
"""Class to specify one track property condition.
All required parameters must be populated in order to send to Azure.
:param property: Required. Track property type. Possible values include: "Unknown", "FourCC".
:type property: str or ~azure.mgmt.media.models.TrackPropertyType
:param operation: Required. Track property condition operation. Possible values include:
"Unknown", "Equal".
:type operation: str or ~azure.mgmt.media.models.TrackPropertyCompareOperation
:param value: Track property value.
:type value: str
"""
_validation = {
'property': {'required': True},
'operation': {'required': True},
}
_attribute_map = {
'property': {'key': 'property', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackPropertyCondition, self).__init__(**kwargs)
self.property = kwargs['property']
self.operation = kwargs['operation']
self.value = kwargs.get('value', None)
class TrackSelection(msrest.serialization.Model):
"""Class to select a track.
:param track_selections: TrackSelections is a track property condition list which can specify
track(s).
:type track_selections: list[~azure.mgmt.media.models.TrackPropertyCondition]
"""
_attribute_map = {
'track_selections': {'key': 'trackSelections', 'type': '[TrackPropertyCondition]'},
}
def __init__(
self,
**kwargs
):
super(TrackSelection, self).__init__(**kwargs)
self.track_selections = kwargs.get('track_selections', None)
class Transform(ProxyResource):
"""A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.media.models.SystemData
:ivar created: The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ'
format.
:vartype created: ~datetime.datetime
:param description: An optional verbose description of the Transform.
:type description: str
:ivar last_modified: The UTC date and time when the Transform was last updated, in
'YYYY-MM-DDThh:mm:ssZ' format.
:vartype last_modified: ~datetime.datetime
:param outputs: An array of one or more TransformOutputs that the Transform should generate.
:type outputs: list[~azure.mgmt.media.models.TransformOutput]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'},
'outputs': {'key': 'properties.outputs', 'type': '[TransformOutput]'},
}
def __init__(
self,
**kwargs
):
super(Transform, self).__init__(**kwargs)
self.system_data = None
self.created = None
self.description = kwargs.get('description', None)
self.last_modified = None
self.outputs = kwargs.get('outputs', None)
class TransformCollection(msrest.serialization.Model):
"""A collection of Transform items.
:param value: A collection of Transform items.
:type value: list[~azure.mgmt.media.models.Transform]
:param odata_next_link: A link to the next page of the collection (when the collection contains
too many results to return in one response).
:type odata_next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Transform]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransformCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class TransformOutput(msrest.serialization.Model):
"""Describes the properties of a TransformOutput, which are the rules to be applied while generating the desired output.
All required parameters must be populated in order to send to Azure.
:param on_error: A Transform can define more than one outputs. This property defines what the
service should do when one output fails - either continue to produce other outputs, or, stop
the other outputs. The overall Job state will not reflect failures of outputs that are
specified with 'ContinueJob'. The default is 'StopProcessingJob'. Possible values include:
"StopProcessingJob", "ContinueJob".
:type on_error: str or ~azure.mgmt.media.models.OnErrorType
:param relative_priority: Sets the relative priority of the TransformOutputs within a
Transform. This sets the priority that the service uses for processing TransformOutputs. The
default priority is Normal. Possible values include: "Low", "Normal", "High".
:type relative_priority: str or ~azure.mgmt.media.models.Priority
:param preset: Required. Preset that describes the operations that will be used to modify,
transcode, or extract insights from the source file to generate the output.
:type preset: ~azure.mgmt.media.models.Preset
"""
_validation = {
'preset': {'required': True},
}
_attribute_map = {
'on_error': {'key': 'onError', 'type': 'str'},
'relative_priority': {'key': 'relativePriority', 'type': 'str'},
'preset': {'key': 'preset', 'type': 'Preset'},
}
def __init__(
self,
**kwargs
):
super(TransformOutput, self).__init__(**kwargs)
self.on_error = kwargs.get('on_error', None)
self.relative_priority = kwargs.get('relative_priority', None)
self.preset = kwargs['preset']
class TransportStreamFormat(MultiBitrateFormat):
"""Describes the properties for generating an MPEG-2 Transport Stream (ISO/IEC 13818-1) output video file(s).
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param filename_pattern: Required. The pattern of the file names for the generated output
files. The following macros are supported in the file name: {Basename} - An expansion macro
that will use the name of the input video file. If the base name(the file suffix is not
included) of the input video file is less than 32 characters long, the base name of input video
files will be used. If the length of base name of the input video file exceeds 32 characters,
the base name is truncated to the first 32 characters in total length. {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index}
- A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video
bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec.
{Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed
from the filename.
:type filename_pattern: str
:param output_files: The list of output files to produce. Each entry in the list is a set of
audio and video layer labels to be muxed together .
:type output_files: list[~azure.mgmt.media.models.OutputFile]
"""
_validation = {
'odata_type': {'required': True},
'filename_pattern': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'filename_pattern': {'key': 'filenamePattern', 'type': 'str'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
}
def __init__(
self,
**kwargs
):
super(TransportStreamFormat, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.TransportStreamFormat' # type: str
class UtcClipTime(ClipTime):
"""Specifies the clip time as a Utc time position in the media file. The Utc time can point to a different position depending on whether the media file starts from a timestamp of zero or not.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param time: Required. The time position on the timeline of the input media based on Utc time.
:type time: ~datetime.datetime
"""
_validation = {
'odata_type': {'required': True},
'time': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'time': {'key': 'time', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(UtcClipTime, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.UtcClipTime' # type: str
self.time = kwargs['time']
class VideoAnalyzerPreset(AudioAnalyzerPreset):
"""A video analyzer preset that extracts insights (rich metadata) from both audio and video, and outputs a JSON format file.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param audio_language: The language for the audio payload in the input using the BCP-47 format
of 'language tag-region' (e.g: 'en-US'). If you know the language of your content, it is
recommended that you specify it. The language must be specified explicitly for
AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If
the language isn't specified or set to null, automatic language detection will choose the first
language detected and process with the selected language for the duration of the file. It does
not currently support dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with clearly discernable
speech. If automatic detection fails to find the language, transcription would fallback to
'en-US'." The list of supported languages is available here:
https://go.microsoft.com/fwlink/?linkid=2109463.
:type audio_language: str
:param mode: Determines the set of audio analysis operations to be performed. If unspecified,
the Standard AudioAnalysisMode would be chosen. Possible values include: "Standard", "Basic".
:type mode: str or ~azure.mgmt.media.models.AudioAnalysisMode
:param experimental_options: Dictionary containing key value pairs for parameters not exposed
in the preset itself.
:type experimental_options: dict[str, str]
:param insights_to_extract: Defines the type of insights that you want the service to generate.
The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default
is AllInsights. If you set this to AllInsights and the input is audio only, then only audio
insights are generated. Similarly if the input is video only, then only video insights are
generated. It is recommended that you not use AudioInsightsOnly if you expect some of your
inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio
only. Your Jobs in such conditions would error out. Possible values include:
"AudioInsightsOnly", "VideoInsightsOnly", "AllInsights".
:type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType
"""
_validation = {
'odata_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'audio_language': {'key': 'audioLanguage', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'},
'insights_to_extract': {'key': 'insightsToExtract', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPreset, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoAnalyzerPreset' # type: str
self.insights_to_extract = kwargs.get('insights_to_extract', None)
class VideoOverlay(Overlay):
"""Describes the properties of a video overlay.
All required parameters must be populated in order to send to Azure.
:param odata_type: Required. The discriminator for derived types.Constant filled by server.
:type odata_type: str
:param input_label: Required. The label of the job input which is to be used as an overlay. The
Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP
format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See
https://aka.ms/mesformats for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at which the overlay
starts. The value should be in ISO 8601 format. For example, PT05S to start the overlay at 5
seconds into the input video. If not specified the overlay starts from the beginning of the
input video.
:type start: ~datetime.timedelta
:param end: The end position, with reference to the input video, at which the overlay ends. The
value should be in ISO 8601 format. For example, PT30S to end the overlay at 30 seconds into
the input video. If not specified or the value is greater than the input video duration, the
overlay will be applied until the end of the input video if the overlay media duration is
greater than the input video duration, else the overlay will last as long as the overlay media
duration.
:type end: ~datetime.timedelta
:param fade_in_duration: The duration over which the overlay fades in onto the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade in (same as PT0S).
:type fade_in_duration: ~datetime.timedelta
:param fade_out_duration: The duration over which the overlay fades out of the input video. The
value should be in ISO 8601 duration format. If not specified the default behavior is to have
no fade out (same as PT0S).
:type fade_out_duration: ~datetime.timedelta
:param audio_gain_level: The gain level of audio in the overlay. The value should be in the
range [0, 1.0]. The default is 1.0.
:type audio_gain_level: float
:param position: The location in the input video where the overlay is applied.
:type position: ~azure.mgmt.media.models.Rectangle
:param opacity: The opacity of the overlay. This is a value in the range [0 - 1.0]. Default is
1.0 which mean the overlay is opaque.
:type opacity: float
:param crop_rectangle: An optional rectangular window used to crop the overlay image or video.
:type crop_rectangle: ~azure.mgmt.media.models.Rectangle
"""
_validation = {
'odata_type': {'required': True},
'input_label': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'input_label': {'key': 'inputLabel', 'type': 'str'},
'start': {'key': 'start', 'type': 'duration'},
'end': {'key': 'end', 'type': 'duration'},
'fade_in_duration': {'key': 'fadeInDuration', 'type': 'duration'},
'fade_out_duration': {'key': 'fadeOutDuration', 'type': 'duration'},
'audio_gain_level': {'key': 'audioGainLevel', 'type': 'float'},
'position': {'key': 'position', 'type': 'Rectangle'},
'opacity': {'key': 'opacity', 'type': 'float'},
'crop_rectangle': {'key': 'cropRectangle', 'type': 'Rectangle'},
}
def __init__(
self,
**kwargs
):
super(VideoOverlay, self).__init__(**kwargs)
self.odata_type = '#Microsoft.Media.VideoOverlay' # type: str
self.position = kwargs.get('position', None)
self.opacity = kwargs.get('opacity', None)
self.crop_rectangle = kwargs.get('crop_rectangle', None)
| mit | -1,920,390,849,565,047,300 | 40.975699 | 517 | 0.657248 | false | 4.012915 | true | false | false |
noironetworks/group-based-policy | gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping.py | 1 | 110494 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import re
from aim import aim_manager
from aim.api import resource as aim_resource
from aim import context as aim_context
from aim import utils as aim_utils
from neutron import policy
from neutron_lib import constants as n_constants
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_utils import excutils
import six
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice._i18n import _
from gbpservice.common import utils as gbp_utils
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
from gbpservice.neutron.extensions import cisco_apic
from gbpservice.neutron.extensions import cisco_apic_gbp as aim_ext
from gbpservice.neutron.extensions import cisco_apic_l3
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
mechanism_driver as md)
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import apic_mapper
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import nova_client
from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_const)
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.drivers import (
neutron_resources as nrd)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_mapping_rpc as aim_rpc)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_validation)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping_lib as alib)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import config # noqa
from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin
LOG = logging.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
FORWARD = 'Forward'
REVERSE = 'Reverse'
FILTER_DIRECTIONS = {FORWARD: False, REVERSE: True}
FORWARD_FILTER_ENTRIES = 'Forward-FilterEntries'
REVERSE_FILTER_ENTRIES = 'Reverse-FilterEntries'
AUTO_PTG_NAME_PREFIX = 'ptg-for-l2p-%s'
# Note that this prefix should not exceede 4 characters
AUTO_PTG_PREFIX = 'auto'
AUTO_PTG_ID_PREFIX = AUTO_PTG_PREFIX + '%s'
# Definitions duplicated from apicapi lib
APIC_OWNED = 'apic_owned_'
CONTRACTS = 'contracts'
CONTRACT_SUBJECTS = 'contract_subjects'
FILTERS = 'filters'
FILTER_ENTRIES = 'filter_entries'
ENFORCED = aim_resource.EndpointGroup.POLICY_ENFORCED
UNENFORCED = aim_resource.EndpointGroup.POLICY_UNENFORCED
DEFAULT_SG_NAME = 'gbp_default'
COMMON_TENANT_AIM_RESOURCES = [aim_resource.Contract.__name__,
aim_resource.ContractSubject.__name__,
aim_resource.Filter.__name__,
aim_resource.FilterEntry.__name__]
# REVISIT: override add_router_interface L3 API check for now
NO_VALIDATE = cisco_apic_l3.OVERRIDE_NETWORK_ROUTING_TOPOLOGY_VALIDATION
class InvalidVrfForDualStackAddressScopes(exc.GroupPolicyBadRequest):
message = _("User-specified address scopes for both address families, "
"(IPv4 and IPv6) must use the same ACI VRF.")
class AutoPTGDeleteNotSupported(exc.GroupPolicyBadRequest):
message = _("Auto PTG %(id)s cannot be deleted.")
class ExplicitAPGAssociationNotSupportedForAutoPTG(
exc.GroupPolicyBadRequest):
message = _("Explicit APG association not supported for Auto PTG, "
"with AIM GBP driver")
class SharedAttributeUpdateNotSupported(exc.GroupPolicyBadRequest):
message = _("Resource shared attribute update not supported with AIM "
"GBP driver for resource of type %(type)s")
class IncorrectSubnetpoolUpdate(exc.GroupPolicyBadRequest):
message = _("Subnetpool %(subnetpool_id)s cannot be disassociated "
"from L3 Policy %(l3p_id)s since it has allocated subnet(s) "
"associated with that L3 Policy")
class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
"""AIM Mapping Orchestration driver.
This driver maps GBP resources to the ACI-Integration-Module (AIM).
"""
@log.log_method_call
def initialize(self):
LOG.info("APIC AIM Policy Driver initializing")
super(AIMMappingDriver, self).initialize()
self._apic_aim_mech_driver = None
self._apic_segmentation_label_driver = None
self._apic_allowed_vm_name_driver = None
self._aim = None
self._name_mapper = None
self.create_auto_ptg = cfg.CONF.aim_mapping.create_auto_ptg
if self.create_auto_ptg:
LOG.info('Auto PTG creation configuration set, '
'this will result in automatic creation of a PTG '
'per L2 Policy')
self.create_per_l3p_implicit_contracts = (
cfg.CONF.aim_mapping.create_per_l3p_implicit_contracts)
self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu
if self.create_per_l3p_implicit_contracts:
LOG.info('Implicit AIM contracts will be created '
'for l3_policies which do not have them.')
self._create_per_l3p_implicit_contracts()
self._nested_host_vlan = (
cfg.CONF.aim_mapping.nested_host_vlan)
@log.log_method_call
def start_rpc_listeners(self):
return []
def validate_state(self, repair, resources, tenants):
mgr = aim_validation.ValidationManager()
return mgr.validate(repair, resources, tenants)
@property
def aim_mech_driver(self):
if not self._apic_aim_mech_driver:
self._apic_aim_mech_driver = (
self._core_plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj)
return self._apic_aim_mech_driver
@property
def aim(self):
if not self._aim:
self._aim = self.aim_mech_driver.aim
return self._aim
@property
def name_mapper(self):
if not self._name_mapper:
self._name_mapper = self.aim_mech_driver.name_mapper
return self._name_mapper
@property
def apic_segmentation_label_driver(self):
if not self._apic_segmentation_label_driver:
ext_drivers = self.gbp_plugin.extension_manager.ordered_ext_drivers
for driver in ext_drivers:
if 'apic_segmentation_label' == driver.name:
self._apic_segmentation_label_driver = (
driver.obj)
break
return self._apic_segmentation_label_driver
@property
def apic_allowed_vm_name_driver(self):
if self._apic_allowed_vm_name_driver is False:
return False
if not self._apic_allowed_vm_name_driver:
ext_drivers = (self.gbp_plugin.extension_manager.
ordered_ext_drivers)
for driver in ext_drivers:
if 'apic_allowed_vm_name' == driver.name:
self._apic_allowed_vm_name_driver = driver.obj
break
if not self._apic_allowed_vm_name_driver:
self._apic_allowed_vm_name_driver = False
return self._apic_allowed_vm_name_driver
@log.log_method_call
def ensure_tenant(self, plugin_context, tenant_id):
self.aim_mech_driver.ensure_tenant(plugin_context, tenant_id)
def aim_display_name(self, name):
return aim_utils.sanitize_display_name(name)
def _use_implicit_address_scope(self, context, ip_version, **kwargs):
# Ensure ipv4 and ipv6 address scope have same vrf
kwargs = {}
if context.saved_scope_vrf:
kwargs.update({cisco_apic.DIST_NAMES: context.saved_scope_vrf})
address_scope = super(AIMMappingDriver,
self)._use_implicit_address_scope(context,
ip_version,
**kwargs)
context.saved_scope_vrf = address_scope[cisco_apic.DIST_NAMES]
return address_scope
# TODO(tbachman): remove once non-isomorphic address scopes
# are supported
def _validate_address_scopes(self, context):
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['id'])
v4_scope_id = l3p_db['address_scope_v4_id']
v6_scope_id = l3p_db['address_scope_v6_id']
if v4_scope_id and v6_scope_id:
v4_scope = self._get_address_scope(
context._plugin_context, v4_scope_id)
v6_scope = self._get_address_scope(
context._plugin_context, v6_scope_id)
if (v4_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF] !=
v6_scope[cisco_apic.DIST_NAMES][cisco_apic.VRF]):
raise InvalidVrfForDualStackAddressScopes()
@log.log_method_call
def create_l3_policy_precommit(self, context):
l3p_req = context.current
self._check_l3policy_ext_segment(context, l3p_req)
self._validate_address_scopes(context)
# REVISIT: Check if the following constraint still holds.
if len(l3p_req['routers']) > 1:
raise exc.L3PolicyMultipleRoutersNotSupported()
# REVISIT: Validate non overlapping IPs in the same tenant.
# Currently this validation is not required for the
# AIM driver, and since the AIM driver is the only
# driver inheriting from this driver, we are okay
# without the check.
self._reject_invalid_router_access(context)
@log.log_method_call
def create_l3_policy_postcommit(self, context):
l3p_req = context.current
# Save VRF DN from v4 family address scope, if implicitly created,
# as we will need to reuse it if we also implicitly create a v6
# address scopes.
context.saved_scope_vrf = None
self._create_l3p_subnetpools_postcommit(context)
# Reset the temporarily saved scope.
context.saved_scope_vrf = None
if not l3p_req['routers']:
self._use_implicit_router(context)
if not l3p_req['external_segments']:
self._use_implicit_external_segment(context)
external_segments = l3p_req['external_segments']
if external_segments:
self._plug_l3p_routers_to_ext_segment(context, l3p_req,
external_segments)
self._create_implicit_contracts(context, l3p_req)
@log.log_method_call
def update_l3_policy_precommit(self, context):
self._reject_shared_update(context, 'l3_policy')
if context.current['routers'] != context.original['routers']:
raise exc.L3PolicyRoutersUpdateNotSupported()
# Currently there is no support for router update in l3p update.
# Added this check just in case it is supported in future.
self._reject_invalid_router_access(context)
self._validate_in_use_by_nsp(context)
self._update_l3p_subnetpools_precommit(context)
self._check_l3policy_ext_segment(context, context.current)
# TODO(Sumit): For extra safety add validation for address_scope change
@log.log_method_call
def update_l3_policy_postcommit(self, context):
self._update_l3p_subnetpools_postcommit(context)
l3p_orig = context.original
l3p_curr = context.current
old_segment_dict = l3p_orig['external_segments']
new_segment_dict = l3p_curr['external_segments']
if (l3p_curr['external_segments'] !=
l3p_orig['external_segments']):
new_segments = set(new_segment_dict.keys())
old_segments = set(old_segment_dict.keys())
removed = old_segments - new_segments
self._unplug_l3p_routers_from_ext_segment(context,
l3p_curr,
removed)
added_dict = {s: new_segment_dict[s]
for s in (new_segments - old_segments)}
if added_dict:
self._plug_l3p_routers_to_ext_segment(context,
l3p_curr,
added_dict)
@log.log_method_call
def delete_l3_policy_precommit(self, context):
pass
@log.log_method_call
def delete_l3_policy_postcommit(self, context):
external_segments = context.current['external_segments']
if external_segments:
self._unplug_l3p_routers_from_ext_segment(
context, context.current, list(external_segments.keys()), True)
self._delete_l3p_subnetpools_postcommit(context)
for router_id in context.current['routers']:
self._cleanup_router(context._plugin_context, router_id)
self._delete_implicit_contracts(context, context.current)
@log.log_method_call
def get_l3_policy_status(self, context):
# Not all of the neutron resources that l3_policy maps to
# has a status attribute, hence we derive the status
# from the AIM resources that the neutron resources map to
session = context._plugin_context.session
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['id'])
mapped_aim_resources = []
# Note: Subnetpool is not mapped to any AIM resource, hence it is not
# considered for deriving the status
mapped_status = []
for ascp in self.L3P_ADDRESS_SCOPE_KEYS.values():
if l3p_db[ascp]:
ascp_id = l3p_db[ascp]
ascope = self._get_address_scope(
context._plugin_context, ascp_id)
vrf_dn = ascope[cisco_apic.DIST_NAMES][cisco_apic.VRF]
aim_vrf = self._get_vrf_by_dn(context, vrf_dn)
mapped_aim_resources.append(aim_vrf)
routers = [router.router_id for router in l3p_db.routers]
for router_id in routers:
# elevated context is used here to enable router retrieval in
# shared L3P cases wherein the call to get_l3_policy might be
# made in the context of a different tenant
router = self._get_router(
context._plugin_context.elevated(), router_id)
mapped_status.append(
{'status': self._map_ml2plus_status(router)})
mapped_status.append({'status': self._merge_aim_status(
session, mapped_aim_resources)})
context.current['status'] = self._merge_gbp_status(mapped_status)
@log.log_method_call
def create_l2_policy_precommit(self, context):
self._reject_invalid_network_access(context)
self._reject_non_shared_net_on_shared_l2p(context)
@log.log_method_call
def create_l2_policy_postcommit(self, context):
if not context.current['l3_policy_id']:
self._use_implicit_l3_policy(context)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
if not context.current['network_id']:
self._use_implicit_network(
context, address_scope_v4=l3p_db['address_scope_v4_id'],
address_scope_v6=l3p_db['address_scope_v6_id'])
l2p = context.current
net = self._get_network(context._plugin_context,
l2p['network_id'])
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
self._configure_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
if self.create_auto_ptg:
default_epg = self._get_epg_by_dn(context, default_epg_dn)
desc = "System created PTG for L2P (UUID: %s)" % l2p['id']
data = {
"id": self._get_auto_ptg_id(l2p['id']),
"name": self._get_auto_ptg_name(l2p),
"description": desc,
"l2_policy_id": l2p['id'],
"proxied_group_id": None,
"proxy_type": None,
"proxy_group_id": n_constants.ATTR_NOT_SPECIFIED,
"network_service_policy_id": None,
"service_management": False,
"shared": l2p['shared'],
"intra_ptg_allow":
self._map_policy_enforcement_pref(default_epg),
}
self._create_policy_target_group(context._plugin_context, data)
@log.log_method_call
def update_l2_policy_precommit(self, context):
super(AIMMappingDriver, self).update_l2_policy_precommit(context)
@log.log_method_call
def update_l2_policy_postcommit(self, context):
pass
@log.log_method_call
def delete_l2_policy_precommit(self, context):
l2p_id = context.current['id']
auto_ptg_id = self._get_auto_ptg_id(l2p_id)
try:
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
if auto_ptg['l2_policy_id']:
auto_ptg.update({'l2_policy_id': None})
except gpolicy.PolicyTargetGroupNotFound:
LOG.info("Auto PTG with ID %(id)s for "
"for L2P %(l2p)s not found. If create_auto_ptg "
"configuration was not set at the time of the L2P "
"creation, you can safely ignore this, else this "
"could potentially be indication of an error.",
{'id': auto_ptg_id, 'l2p': l2p_id})
@log.log_method_call
def delete_l2_policy_postcommit(self, context):
auto_ptg_id = self._get_auto_ptg_id(context.current['id'])
try:
auto_ptg = context._plugin._get_policy_target_group(
context._plugin_context, auto_ptg_id)
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, context.current['l3_policy_id'])
subnet_ids = [assoc['subnet_id'] for assoc in auto_ptg.subnets]
router_ids = [assoc.router_id for assoc in l3p_db.routers]
context._plugin._remove_subnets_from_policy_target_group(
context._plugin_context, auto_ptg_id)
self._process_subnets_for_ptg_delete(
context, subnet_ids, router_ids)
# REVISIT: Consider calling the actual GBP plugin instead
# of it's base DB mixin class, eliminating the need to
# call _process_subnets_for_ptg_delete above.
self._db_plugin(
context._plugin).delete_policy_target_group(
context._plugin_context, auto_ptg['id'])
except gpolicy.PolicyTargetGroupNotFound:
# Logged in precommit.
pass
super(AIMMappingDriver, self).delete_l2_policy_postcommit(context)
@log.log_method_call
def get_l2_policy_status(self, context):
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, context.current['id'])
net = self._get_network(context._plugin_context,
l2p_db['network_id'])
if net:
context.current['status'] = net['status']
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
l3p_db = context._plugin._get_l3_policy(
context._plugin_context, l2p_db['l3_policy_id'])
aim_resources = self._get_implicit_contracts_for_default_epg(
context, l3p_db, default_epg_dn)
aim_resources_list = []
for k in aim_resources.keys():
if not aim_resources[k] or not all(
x for x in aim_resources[k]):
# We expected a AIM mapped resource but did not find
# it, so something seems to be wrong
context.current['status'] = gp_const.STATUS_ERROR
return
aim_resources_list.extend(aim_resources[k])
merged_aim_status = self._merge_aim_status(
context._plugin_context.session, aim_resources_list)
context.current['status'] = self._merge_gbp_status(
[context.current, {'status': merged_aim_status}])
else:
context.current['status'] = gp_const.STATUS_ERROR
@log.log_method_call
def create_policy_target_group_precommit(self, context):
if self._is_auto_ptg(context.current):
if context.current['application_policy_group_id']:
raise ExplicitAPGAssociationNotSupportedForAutoPTG()
return
if context.current['subnets']:
raise alib.ExplicitSubnetAssociationNotSupported()
@log.log_method_call
def create_policy_target_group_postcommit(self, context):
if self._is_auto_ptg(context.current):
self._use_implicit_subnet(context)
self._handle_create_network_service_policy(context)
return
if not context.current['l2_policy_id']:
self._use_implicit_l2_policy(context)
self._use_implicit_subnet(context)
self._handle_create_network_service_policy(context)
with db_api.CONTEXT_WRITER.using(context) as session:
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, context.current['l2_policy_id'])
net = self._get_network(
context._plugin_context, l2p_db['network_id'])
bd = self.aim_mech_driver.get_bd_for_network(session, net)
provided_contracts = self._get_aim_contract_names(
session, context.current['provided_policy_rule_sets'])
consumed_contracts = self._get_aim_contract_names(
session, context.current['consumed_policy_rule_sets'])
self._create_aim_ap_for_ptg_conditionally(context, context.current)
aim_epg = self._aim_endpoint_group(
session, context.current, bd.name, bd.tenant_name,
provided_contracts=provided_contracts,
consumed_contracts=consumed_contracts,
policy_enforcement_pref=(
self._get_policy_enforcement_pref(context.current)))
# AIM EPG will be persisted in the following call
self._add_implicit_svc_contracts_to_epg(context, l2p_db, aim_epg)
@log.log_method_call
def update_policy_target_group_precommit(self, context):
self._reject_shared_update(context, 'policy_target_group')
session = context._plugin_context.session
old_provided_contracts = self._get_aim_contract_names(
session, context.original['provided_policy_rule_sets'])
old_consumed_contracts = self._get_aim_contract_names(
session, context.original['consumed_policy_rule_sets'])
new_provided_contracts = self._get_aim_contract_names(
session, context.current['provided_policy_rule_sets'])
new_consumed_contracts = self._get_aim_contract_names(
session, context.current['consumed_policy_rule_sets'])
if (context.current['network_service_policy_id'] !=
context.original['network_service_policy_id']):
self._validate_nat_pool_for_nsp(context)
# The "original" version of the ptg is being used here since we
# want to retrieve the aim_epg based on the existing AP that is
# a part of its indentity
aim_epg = self._get_aim_endpoint_group(session, context.original)
if aim_epg:
if not self._is_auto_ptg(context.current):
aim_epg.display_name = (
self.aim_display_name(context.current['name']))
if (context.current['application_policy_group_id'] !=
context.original['application_policy_group_id']):
ap = self._create_aim_ap_for_ptg_conditionally(
context, context.current)
aim_epg = self._move_epg_to_new_ap(context, aim_epg, ap)
self._delete_aim_ap_for_ptg_conditionally(
context, context.original)
elif context.current['application_policy_group_id']:
raise ExplicitAPGAssociationNotSupportedForAutoPTG()
aim_epg.policy_enforcement_pref = (
self._get_policy_enforcement_pref(context.current))
aim_epg.provided_contract_names = (
list((set(aim_epg.provided_contract_names) -
set(old_provided_contracts)) |
set(new_provided_contracts)))
aim_epg.consumed_contract_names = (
list((set(aim_epg.consumed_contract_names) -
set(old_consumed_contracts)) |
set(new_consumed_contracts)))
self._add_contracts_for_epg(
aim_context.AimContext(session), aim_epg)
@log.log_method_call
def update_policy_target_group_postcommit(self, context):
if (context.current['network_service_policy_id'] !=
context.original['network_service_policy_id']):
self._handle_nsp_update_on_ptg(context)
if (context.current['application_policy_group_id'] !=
context.original['application_policy_group_id']):
ptargets = context._plugin.get_policy_targets(
context._plugin_context, {'policy_target_group_id':
[context.current['id']]})
for pt in ptargets:
self.aim_mech_driver._notify_port_update(
context._plugin_context, pt['port_id'])
@log.log_method_call
def delete_policy_target_group_precommit(self, context):
plugin_context = context._plugin_context
auto_ptg_id = self._get_auto_ptg_id(context.current['l2_policy_id'])
context.nsp_cleanup_ipaddress = self._get_ptg_policy_ipaddress_mapping(
context._plugin_context, context.current['id'])
context.nsp_cleanup_fips = self._get_ptg_policy_fip_mapping(
context._plugin_context, context.current['id'])
if context.current['id'] == auto_ptg_id:
raise AutoPTGDeleteNotSupported(id=context.current['id'])
ptg_db = context._plugin._get_policy_target_group(
plugin_context, context.current['id'])
context.subnet_ids = [assoc['subnet_id'] for assoc in ptg_db.subnets]
context.router_ids = [assoc.router_id for assoc in
ptg_db.l2_policy.l3_policy.routers]
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
epg = self._aim_endpoint_group(session, context.current)
self.aim.delete(aim_ctx, epg)
self._delete_aim_ap_for_ptg_conditionally(context, ptg_db)
if ptg_db['network_service_policy_id']:
# Also called from _cleanup_network_service_policy during
# postcommit, but needed during precommit to avoid foreign
# key constraint error.
self._delete_policy_ipaddress_mapping(plugin_context, ptg_db['id'])
@log.log_method_call
def delete_policy_target_group_postcommit(self, context):
self._process_subnets_for_ptg_delete(
context, context.subnet_ids, context.router_ids)
ptg = context.current.copy()
ptg['subnets'] = []
l2p_id = ptg['l2_policy_id']
if l2p_id:
l2p_db = context._plugin._get_l2_policy(
context._plugin_context, l2p_id)
if not l2p_db['policy_target_groups'] or (
(len(l2p_db['policy_target_groups']) == 1) and (
self._is_auto_ptg(l2p_db['policy_target_groups'][0]))):
self._cleanup_l2_policy(context, l2p_id)
if ptg['network_service_policy_id']:
# REVISIT: Note that the RMD puts the following call in
# try/except block since in deployment it was observed
# that there are certain situations when the
# sa_exc.ObjectDeletedError is thrown.
self._cleanup_network_service_policy(
context, ptg, context.nsp_cleanup_ipaddress,
context.nsp_cleanup_fips)
@log.log_method_call
def extend_policy_target_group_dict(self, session, result):
epg = self._aim_endpoint_group(session, result)
if epg:
result[cisco_apic.DIST_NAMES] = {cisco_apic.EPG: epg.dn}
@log.log_method_call
def get_policy_target_group_status(self, context):
session = context._plugin_context.session
epg = self._aim_endpoint_group(session, context.current)
context.current['status'] = self._map_aim_status(session, epg)
@log.log_method_call
def create_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def create_application_policy_group_postcommit(self, context):
pass
@log.log_method_call
def update_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def update_application_policy_group_postcommit(self, context):
pass
@log.log_method_call
def delete_application_policy_group_precommit(self, context):
pass
@log.log_method_call
def delete_application_policy_group_postcommit(self, context):
pass
def _get_application_profiles_mapped_to_apg(self, session, apg):
aim_ctx = aim_context.AimContext(session)
ap_name = self.apic_ap_name_for_application_policy_group(
session, apg['id'])
return self.aim.find(
aim_ctx, aim_resource.ApplicationProfile, name=ap_name)
@log.log_method_call
def extend_application_policy_group_dict(self, session, result):
aim_aps = self._get_application_profiles_mapped_to_apg(session, result)
dn_list = [ap.dn for ap in aim_aps]
result[cisco_apic.DIST_NAMES] = {cisco_apic.AP: dn_list}
@log.log_method_call
def get_application_policy_group_status(self, context):
session = context._plugin_context.session
aim_aps = self._get_application_profiles_mapped_to_apg(
session, context.current)
context.current['status'] = self._merge_aim_status(
context._plugin_context.session, aim_aps)
@log.log_method_call
def create_policy_target_precommit(self, context):
context.ptg = self._get_policy_target_group(
context._plugin_context, context.current['policy_target_group_id'])
policy.enforce(context._plugin_context, 'get_policy_target_group',
context.ptg, pluralized='policy_target_groups')
if context.current['port_id']:
# Explicit port case.
#
# REVISIT: Add port extension to specify the EPG so the
# mechanism driver can take care of domain association
# itself.
port_context = self.aim_mech_driver.make_port_context(
context._plugin_context, context.current['port_id'])
self.aim_mech_driver.associate_domain(port_context)
@log.log_method_call
def create_policy_target_postcommit(self, context):
if not context.current['port_id']:
# Implicit port case.
subnets = self._get_subnets(
context._plugin_context, {'id': context.ptg['subnets']})
self._use_implicit_port(context, subnets=subnets)
self._associate_fip_to_pt(context)
@log.log_method_call
def update_policy_target_precommit(self, context):
pass
@log.log_method_call
def update_policy_target_postcommit(self, context):
if self.apic_segmentation_label_driver and (
set(context.current['segmentation_labels']) != (
set(context.original['segmentation_labels']))):
self.aim_mech_driver._notify_port_update(
context._plugin_context, context.current['port_id'])
@log.log_method_call
def delete_policy_target_precommit(self, context):
if context.current.get('port_id'):
# REVISIT: Add port extension to specify the EPG so the
# mechanism driver can take care of domain association
# itself.
port_context = self.aim_mech_driver.make_port_context(
context._plugin_context, context.current['port_id'])
self.aim_mech_driver.disassociate_domain(port_context)
@log.log_method_call
def delete_policy_target_postcommit(self, context):
fips = self._get_pt_floating_ip_mapping(
context._plugin_context, context.current['id'])
for fip in fips:
self._delete_fip(context._plugin_context, fip.floatingip_id)
self._cleanup_port(
context._plugin_context, context.current.get('port_id'))
@log.log_method_call
def get_policy_target_status(self, context):
pass
@log.log_method_call
def create_policy_classifier_precommit(self, context):
pass
@log.log_method_call
def create_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def update_policy_classifier_precommit(self, context):
o_dir = context.original['direction']
c_dir = context.current['direction']
o_prot = context.original['protocol']
c_prot = context.current['protocol']
o_port_min, o_port_max = (
gpmdb.GroupPolicyMappingDbPlugin._get_min_max_ports_from_range(
context.original['port_range']))
c_port_min, c_port_max = (
gpmdb.GroupPolicyMappingDbPlugin._get_min_max_ports_from_range(
context.current['port_range']))
if ((o_dir == c_dir) and (o_prot == c_prot) and (
o_port_min == c_port_min) and (o_port_max == c_port_max)):
# none of the fields relevant to the aim_mapping have changed
# so no further processing is required
return
prules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'policy_classifier_id': [context.current['id']]})
if not prules:
# this policy_classifier has not yet been assocaited with
# a policy_rule and hence will not have any mapped aim
# resources
return
prule_ids = [x['id'] for x in prules]
prule_sets = self._get_prss_for_policy_rules(context, prule_ids)
for pr in prules:
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
# delete old filter_entries
self._delete_filter_entries_for_policy_rule(
session, aim_ctx, pr)
aim_filter = self._aim_filter(session, pr)
aim_reverse_filter = self._aim_filter(
session, pr, reverse_prefix=True)
entries = alib.get_filter_entries_for_policy_classifier(
context.current)
remove_aim_reverse_filter = None
if not entries['reverse_rules']:
# the updated classifier's protocol does not have
# reverse filter_entries
if self.aim.get(aim_ctx, aim_reverse_filter):
# so remove the older reverse filter if it exists
self.aim.delete(aim_ctx, aim_reverse_filter)
remove_aim_reverse_filter = aim_reverse_filter.name
# Unset the reverse filter name so that its not
# used in further processing
aim_reverse_filter.name = None
# create new filter_entries mapping to the updated
# classifier and associated with aim_filters
self._create_policy_rule_aim_mappings(
session, aim_ctx, pr, entries)
# update contract_subject to put the filter in the
# appropriate in/out buckets corresponding to the
# updated direction of the policy_classifier
if remove_aim_reverse_filter or (o_dir != c_dir):
for prs in prule_sets:
aim_contract_subject = self._get_aim_contract_subject(
session, prs)
# Remove the older reverse filter if needed
for filters in [aim_contract_subject.in_filters,
aim_contract_subject.out_filters]:
if remove_aim_reverse_filter in filters:
filters.remove(remove_aim_reverse_filter)
if o_dir != c_dir:
# First remove the filter from the older
# direction list
for flist in [aim_contract_subject.in_filters,
aim_contract_subject.out_filters]:
for fname in [aim_filter.name,
aim_reverse_filter.name]:
if fname in flist:
flist.remove(fname)
# Now add it to the relevant direction list(s)
if c_dir == g_const.GP_DIRECTION_IN:
aim_contract_subject.in_filters.append(
aim_filter.name)
aim_contract_subject.out_filters.append(
aim_reverse_filter.name)
elif c_dir == g_const.GP_DIRECTION_OUT:
aim_contract_subject.in_filters.append(
aim_reverse_filter.name)
aim_contract_subject.out_filters.append(
aim_filter.name)
else:
aim_contract_subject.in_filters.append(
aim_filter.name)
aim_contract_subject.out_filters.append(
aim_reverse_filter.name)
aim_contract_subject.in_filters.append(
aim_reverse_filter.name)
aim_contract_subject.out_filters.append(
aim_filter.name)
self.aim.create(aim_ctx, aim_contract_subject,
overwrite=True)
@log.log_method_call
def update_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_classifier_precommit(self, context):
pass
@log.log_method_call
def delete_policy_classifier_postcommit(self, context):
pass
@log.log_method_call
def get_policy_classifier_status(self, context):
pass
@log.log_method_call
def create_policy_action_precommit(self, context):
pass
@log.log_method_call
def create_policy_action_postcommit(self, context):
pass
@log.log_method_call
def update_policy_action_precommit(self, context):
pass
@log.log_method_call
def update_policy_action_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_action_precommit(self, context):
pass
@log.log_method_call
def delete_policy_action_postcommit(self, context):
pass
@log.log_method_call
def get_policy_action_status(self, context):
pass
@log.log_method_call
def create_policy_rule_precommit(self, context):
entries = alib.get_filter_entries_for_policy_rule(context)
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
self._create_policy_rule_aim_mappings(
session, aim_ctx, context.current, entries)
@log.log_method_call
def create_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def update_policy_rule_precommit(self, context):
self.delete_policy_rule_precommit(context)
self.create_policy_rule_precommit(context)
@log.log_method_call
def update_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_rule_precommit(self, context):
session = context._plugin_context.session
aim_ctx = self._get_aim_context(context)
self._delete_filter_entries_for_policy_rule(session,
aim_ctx, context.current)
aim_filter = self._aim_filter(session, context.current)
aim_reverse_filter = self._aim_filter(
session, context.current, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
self.aim.delete(aim_ctx, afilter)
@log.log_method_call
def delete_policy_rule_postcommit(self, context):
pass
@log.log_method_call
def extend_policy_rule_dict(self, session, result):
result[cisco_apic.DIST_NAMES] = {}
aim_filter_entries = self._get_aim_filter_entries(session, result)
for k, v in six.iteritems(aim_filter_entries):
dn_list = []
for entry in v:
dn_list.append(entry.dn)
if k == FORWARD:
result[cisco_apic.DIST_NAMES].update(
{aim_ext.FORWARD_FILTER_ENTRIES: dn_list})
else:
result[cisco_apic.DIST_NAMES].update(
{aim_ext.REVERSE_FILTER_ENTRIES: dn_list})
@log.log_method_call
def get_policy_rule_status(self, context):
session = context._plugin_context.session
aim_filters = self._get_aim_filters(session, context.current)
aim_filter_entries = self._get_aim_filter_entries(
session, context.current)
context.current['status'] = self._merge_aim_status(
session,
list(aim_filters.values()) + list(aim_filter_entries.values()))
@log.log_method_call
def create_policy_rule_set_precommit(self, context):
if context.current['child_policy_rule_sets']:
raise alib.HierarchicalContractsNotSupported()
aim_ctx = self._get_aim_context(context)
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
self.aim.create(aim_ctx, aim_contract)
rules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
self._populate_aim_contract_subject(context, aim_contract, rules)
@log.log_method_call
def create_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def update_policy_rule_set_precommit(self, context):
if context.current['child_policy_rule_sets']:
raise alib.HierarchicalContractsNotSupported()
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
rules = self._db_plugin(context._plugin).get_policy_rules(
context._plugin_context,
filters={'id': context.current['policy_rules']})
self._populate_aim_contract_subject(
context, aim_contract, rules)
@log.log_method_call
def update_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def delete_policy_rule_set_precommit(self, context):
aim_ctx = self._get_aim_context(context)
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
self._delete_aim_contract_subject(aim_ctx, aim_contract)
self.aim.delete(aim_ctx, aim_contract)
@log.log_method_call
def delete_policy_rule_set_postcommit(self, context):
pass
@log.log_method_call
def extend_policy_rule_set_dict(self, session, result):
result[cisco_apic.DIST_NAMES] = {}
aim_contract = self._aim_contract(session, result)
aim_contract_subject = self._aim_contract_subject(aim_contract)
result[cisco_apic.DIST_NAMES].update(
{aim_ext.CONTRACT: aim_contract.dn,
aim_ext.CONTRACT_SUBJECT: aim_contract_subject.dn})
@log.log_method_call
def get_policy_rule_set_status(self, context):
session = context._plugin_context.session
aim_contract = self._aim_contract(session, context.current)
aim_contract_subject = self._aim_contract_subject(aim_contract)
context.current['status'] = self._merge_aim_status(
session, [aim_contract, aim_contract_subject])
@log.log_method_call
def create_external_segment_precommit(self, context):
self._validate_default_external_segment(context)
if not context.current['subnet_id']:
raise exc.ImplicitSubnetNotSupported()
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
network = self._get_network(context._plugin_context,
subnet['network_id'])
if not network['router:external']:
raise exc.InvalidSubnetForES(sub_id=subnet['id'],
net_id=network['id'])
db_es = context._plugin._get_external_segment(
context._plugin_context, context.current['id'])
db_es.cidr = subnet['cidr']
db_es.ip_version = subnet['ip_version']
context.current['cidr'] = db_es.cidr
context.current['ip_version'] = db_es.ip_version
context.network_id = subnet['network_id']
@log.log_method_call
def create_external_segment_postcommit(self, context):
cidrs = sorted([x['destination']
for x in context.current['external_routes']])
self._update_network(context._plugin_context,
context.network_id,
{cisco_apic.EXTERNAL_CIDRS: cidrs})
@log.log_method_call
def update_external_segment_precommit(self, context):
# REVISIT: what other attributes should we prevent an update on?
invalid = ['port_address_translation']
for attr in invalid:
if context.current[attr] != context.original[attr]:
raise exc.InvalidAttributeUpdateForES(attribute=attr)
@log.log_method_call
def update_external_segment_postcommit(self, context):
old_cidrs = sorted([x['destination']
for x in context.original['external_routes']])
new_cidrs = sorted([x['destination']
for x in context.current['external_routes']])
if old_cidrs != new_cidrs:
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
self._update_network(context._plugin_context,
subnet['network_id'],
{cisco_apic.EXTERNAL_CIDRS: new_cidrs})
@log.log_method_call
def delete_external_segment_precommit(self, context):
pass
@log.log_method_call
def delete_external_segment_postcommit(self, context):
subnet = self._get_subnet(context._plugin_context,
context.current['subnet_id'])
self._update_network(context._plugin_context,
subnet['network_id'],
{cisco_apic.EXTERNAL_CIDRS: ['0.0.0.0/0']})
@log.log_method_call
def get_external_segment_status(self, context):
pass
@log.log_method_call
def create_external_policy_precommit(self, context):
self._check_external_policy(context, context.current)
@log.log_method_call
def create_external_policy_postcommit(self, context):
if not context.current['external_segments']:
self._use_implicit_external_segment(context)
routers = self._get_ext_policy_routers(context,
context.current, context.current['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, context.current)
@log.log_method_call
def update_external_policy_precommit(self, context):
self._check_external_policy(context, context.current)
@log.log_method_call
def update_external_policy_postcommit(self, context):
ep = context.current
old_ep = context.original
removed_segments = (set(old_ep['external_segments']) -
set(ep['external_segments']))
added_segment = (set(ep['external_segments']) -
set(old_ep['external_segments']))
if removed_segments:
routers = self._get_ext_policy_routers(context, ep,
removed_segments)
for r in routers:
self._set_router_ext_contracts(context, r, None)
if (added_segment or
sorted(old_ep['provided_policy_rule_sets']) !=
sorted(ep['provided_policy_rule_sets']) or
sorted(old_ep['consumed_policy_rule_sets']) !=
sorted(ep['consumed_policy_rule_sets'])):
routers = self._get_ext_policy_routers(context, ep,
ep['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, ep)
@log.log_method_call
def delete_external_policy_precommit(self, context):
pass
@log.log_method_call
def delete_external_policy_postcommit(self, context):
routers = self._get_ext_policy_routers(context,
context.current, context.current['external_segments'])
for r in routers:
self._set_router_ext_contracts(context, r, None)
@log.log_method_call
def get_external_policy_status(self, context):
pass
@log.log_method_call
def create_network_service_policy_precommit(self, context):
self._validate_nsp_parameters(context)
@log.log_method_call
def create_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def update_network_service_policy_precommit(self, context):
self._validate_nsp_parameters(context)
@log.log_method_call
def update_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def delete_network_service_policy_precommit(self, context):
pass
@log.log_method_call
def delete_network_service_policy_postcommit(self, context):
pass
@log.log_method_call
def get_network_service_policy_status(self, context):
pass
@log.log_method_call
def create_nat_pool_precommit(self, context):
self._add_nat_pool_to_segment(context)
@log.log_method_call
def create_nat_pool_postcommit(self, context):
self._add_implicit_subnet_for_nat_pool_create(context)
@log.log_method_call
def update_nat_pool_precommit(self, context):
self._process_ext_segment_update_for_nat_pool(context)
@log.log_method_call
def update_nat_pool_postcommit(self, context):
self._add_implicit_subnet_for_nat_pool_update(context)
@log.log_method_call
def delete_nat_pool_precommit(self, context):
self._nat_pool_in_use(context)
@log.log_method_call
def delete_nat_pool_postcommit(self, context):
self._delete_subnet_on_nat_pool_delete(context)
@log.log_method_call
def get_nat_pool_status(self, context):
pass
# REVISIT: Called by mechanism driver during port
# binding. Consider replacing with a more general hook for the PD
# to participate in port binding. Or consider removing/replacing
# this feature, since VM names should not effect behavior.
def check_allow_vm_names(self, context, port):
ok_to_bind = True
ptg, pt = self._port_id_to_ptg(context._plugin_context, port['id'])
# enforce the allowed_vm_names rules if possible
if (ptg and port['device_id'] and
self.apic_allowed_vm_name_driver):
l2p = self._get_l2_policy(context._plugin_context,
ptg['l2_policy_id'])
l3p = self.gbp_plugin.get_l3_policy(
context._plugin_context, l2p['l3_policy_id'])
if l3p.get('allowed_vm_names'):
ok_to_bind = False
vm = nova_client.NovaClient().get_server(port['device_id'])
for allowed_vm_name in l3p['allowed_vm_names']:
match = re.search(allowed_vm_name, vm.name)
if match:
ok_to_bind = True
break
if not ok_to_bind:
LOG.warning("Failed to bind the port due to "
"allowed_vm_names rules %(rules)s "
"for VM: %(vm)s",
{'rules': l3p['allowed_vm_names'],
'vm': vm.name})
return ok_to_bind
# REVISIT: Called by mechanism driver when disassociating a
# domain. Consider a more general way for neutron ports to be
# bound using a non-default EPG.
def get_ptg_port_ids(self, context, ptg):
pts = self.gbp_plugin.get_policy_targets(
context, {'id': ptg['policy_targets']})
return [x['port_id'] for x in pts]
def _reject_shared_update(self, context, type):
if context.original.get('shared') != context.current.get('shared'):
raise SharedAttributeUpdateNotSupported(type=type)
def _aim_tenant_name(self, session, tenant_id, aim_resource_class=None,
gbp_resource=None, gbp_obj=None):
if aim_resource_class and (
aim_resource_class.__name__ in COMMON_TENANT_AIM_RESOURCES):
# COMMON_TENANT_AIM_RESOURCES will always be created in the
# ACI common tenant
aim_ctx = aim_context.AimContext(session)
self.aim_mech_driver._ensure_common_tenant(aim_ctx)
tenant_name = md.COMMON_TENANT_NAME
else:
l3p_id = None
if aim_resource_class.__name__ == (
aim_resource.EndpointGroup.__name__):
# the gbp_obj here should be a ptg
l2p_id = gbp_obj['l2_policy_id']
if l2p_id:
query = BAKERY(lambda s: s.query(
gpmdb.L2PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l2p_id'))
l2p_db = query(session).params(
l2p_id=l2p_id).first()
l3p_id = l2p_db['l3_policy_id']
elif aim_resource_class.__name__ == (
aim_resource.BridgeDomain.__name__):
# the gbp_obj here should be a l2p
l3p_id = gbp_obj['l3_policy_id']
if l3p_id:
query = BAKERY(lambda s: s.query(
gpmdb.L3PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l3p_id'))
l3p_db = query(session).params(
l3p_id=l3p_id).first()
tenant_id = l3p_db['tenant_id']
tenant_name = self.name_mapper.project(session, tenant_id)
LOG.debug("Mapped tenant_id %(id)s to %(apic_name)s",
{'id': tenant_id, 'apic_name': tenant_name})
return tenant_name
def _aim_application_profile_for_ptg(self, context, ptg):
# This returns a new AIM ApplicationProfile resource if apg_id
# is set, else returns None
apg_id = ptg['application_policy_group_id']
if apg_id:
apg = context._plugin._get_application_policy_group(
context._plugin_context, apg_id)
return self._aim_application_profile(
context._plugin_context.session, apg)
def _aim_application_profile(self, session, apg):
# This returns a new AIM ApplicationProfile resource
tenant_id = apg['tenant_id']
tenant_name = self._aim_tenant_name(
session, tenant_id,
aim_resource_class=aim_resource.ApplicationProfile, gbp_obj=apg)
display_name = self.aim_display_name(apg['name'])
ap_name = self.apic_ap_name_for_application_policy_group(
session, apg['id'])
ap = aim_resource.ApplicationProfile(tenant_name=tenant_name,
display_name=display_name,
name=ap_name)
LOG.debug("Mapped apg_id %(id)s with name %(name)s to %(apic_name)s",
{'id': apg['id'], 'name': display_name,
'apic_name': ap_name})
return ap
def _get_aim_application_profile_for_ptg(self, context, ptg):
# This gets an AP from the AIM DB
ap = self._aim_application_profile_for_ptg(context, ptg)
if ap:
return self._get_aim_application_profile_from_db(
context._plugin_context.session, ap)
def _get_aim_application_profile_from_db(self, session, ap):
aim_ctx = aim_context.AimContext(session)
ap_fetched = self.aim.get(aim_ctx, ap)
if not ap_fetched:
LOG.debug("No ApplicationProfile found in AIM DB")
else:
LOG.debug("Got ApplicationProfile: %s", ap_fetched.__dict__)
return ap_fetched
def _create_aim_ap_for_ptg_conditionally(self, context, ptg):
if ptg and ptg['application_policy_group_id'] and not (
self._get_aim_application_profile_for_ptg(context, ptg)):
ap = self._aim_application_profile_for_ptg(context, ptg)
aim_ctx = aim_context.AimContext(context._plugin_context.session)
self.aim.create(aim_ctx, ap)
return ap
def _move_epg_to_new_ap(self, context, old_epg, new_ap):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
self.aim.delete(aim_ctx, old_epg)
old_epg.app_profile_name = (
self.apic_ap_name_for_application_policy_group(
session, context.current['application_policy_group_id']))
self.aim.create(aim_ctx, old_epg)
return old_epg
def _delete_aim_ap_for_ptg_conditionally(self, context, ptg):
# It is assumed that this method is called after the EPG corresponding
# to the PTG has been deleted in AIM
if ptg and ptg['application_policy_group_id']:
ap = self._aim_application_profile_for_ptg(context, ptg)
apg_id = ptg['application_policy_group_id']
apg_db = context._plugin._get_application_policy_group(
context._plugin_context, apg_id)
if not apg_db['policy_target_groups'] or (
len(apg_db['policy_target_groups']) == 1 and (
apg_db['policy_target_groups'][0]['id'] == ptg['id'])):
# We lazily create the ApplicationProfile, so we delete
# it when the last PTG associated with this APG is deleted
aim_ctx = aim_context.AimContext(
context._plugin_context.session)
self.aim.delete(aim_ctx, ap)
# REVISIT: Called by mechanism driver when associating or
# disassociating a domain. Consider a more general way for neutron
# ports to be bound using a non-default EPG.
def _aim_endpoint_group(self, session, ptg, bd_name=None,
bd_tenant_name=None,
provided_contracts=None,
consumed_contracts=None,
policy_enforcement_pref=UNENFORCED):
# This returns a new AIM EPG resource
tenant_id = ptg['tenant_id']
tenant_name = self._aim_tenant_name(
session, tenant_id, aim_resource_class=aim_resource.EndpointGroup,
gbp_obj=ptg)
id = ptg['id']
name = ptg['name']
display_name = self.aim_display_name(ptg['name'])
ap_name = self.apic_ap_name_for_application_policy_group(
session, ptg['application_policy_group_id'])
epg_name = self.apic_epg_name_for_policy_target_group(
session, id, name)
LOG.debug("Using application_profile %(ap_name)s "
"for epg %(epg_name)s",
{'ap_name': ap_name, 'epg_name': epg_name})
LOG.debug("Mapped ptg_id %(id)s with name %(name)s to %(apic_name)s",
{'id': id, 'name': name, 'apic_name': epg_name})
kwargs = {'tenant_name': str(tenant_name),
'name': str(epg_name),
'display_name': display_name,
'app_profile_name': ap_name,
'policy_enforcement_pref': policy_enforcement_pref}
if bd_name:
kwargs['bd_name'] = bd_name
if bd_tenant_name:
kwargs['bd_tenant_name'] = bd_tenant_name
if provided_contracts:
kwargs['provided_contract_names'] = provided_contracts
if consumed_contracts:
kwargs['consumed_contract_names'] = consumed_contracts
epg = aim_resource.EndpointGroup(**kwargs)
return epg
def _get_aim_endpoint_group(self, session, ptg):
# This gets an EPG from the AIM DB
epg = self._aim_endpoint_group(session, ptg)
aim_ctx = aim_context.AimContext(session)
epg_fetched = self.aim.get(aim_ctx, epg)
if not epg_fetched:
LOG.debug("No EPG found in AIM DB")
else:
LOG.debug("Got epg: %s", vars(epg_fetched))
return epg_fetched
def _aim_filter(self, session, pr, reverse_prefix=False):
# This returns a new AIM Filter resource
tenant_id = pr['tenant_id']
tenant_name = self._aim_tenant_name(session, tenant_id,
aim_resource.Filter)
id = pr['id']
name = pr['name']
display_name = self.aim_display_name(pr['name'])
if reverse_prefix:
filter_name = self.name_mapper.policy_rule(
session, id, prefix=alib.REVERSE_PREFIX)
else:
filter_name = self.name_mapper.policy_rule(session, id)
LOG.debug("Mapped policy_rule_id %(id)s with name %(name)s to"
"%(apic_name)s",
{'id': id, 'name': name, 'apic_name': filter_name})
kwargs = {'tenant_name': str(tenant_name),
'name': str(filter_name),
'display_name': display_name}
aim_filter = aim_resource.Filter(**kwargs)
return aim_filter
def _aim_filter_entry(self, session, aim_filter, filter_entry_name,
filter_entry_attrs):
# This returns a new AIM FilterEntry resource
tenant_name = aim_filter.tenant_name
filter_name = aim_filter.name
display_name = self.aim_display_name(filter_name)
kwargs = {'tenant_name': tenant_name,
'filter_name': filter_name,
'name': filter_entry_name,
'display_name': display_name}
kwargs.update(filter_entry_attrs)
aim_filter_entry = aim_resource.FilterEntry(**kwargs)
return aim_filter_entry
def _create_policy_rule_aim_mappings(
self, session, aim_context, pr, entries):
if entries['forward_rules']:
aim_filter = self._aim_filter(session, pr)
self.aim.create(aim_context, aim_filter, overwrite=True)
self._create_aim_filter_entries(session, aim_context, aim_filter,
entries['forward_rules'])
if entries['reverse_rules']:
# Also create reverse rule
aim_filter = self._aim_filter(session, pr,
reverse_prefix=True)
self.aim.create(aim_context, aim_filter, overwrite=True)
self._create_aim_filter_entries(
session, aim_context, aim_filter, entries['reverse_rules'])
def _delete_aim_filter_entries(self, aim_context, aim_filter):
aim_filter_entries = self.aim.find(
aim_context, aim_resource.FilterEntry,
tenant_name=aim_filter.tenant_name,
filter_name=aim_filter.name)
for entry in aim_filter_entries:
self.aim.delete(aim_context, entry)
def _delete_filter_entries_for_policy_rule(self, session, aim_context, pr):
aim_filter = self._aim_filter(session, pr)
aim_reverse_filter = self._aim_filter(
session, pr, reverse_prefix=True)
for afilter in filter(None, [aim_filter, aim_reverse_filter]):
self._delete_aim_filter_entries(aim_context, afilter)
def _create_aim_filter_entries(self, session, aim_ctx, aim_filter,
filter_entries):
for k, v in six.iteritems(filter_entries):
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v)
def _create_aim_filter_entry(self, session, aim_ctx, aim_filter,
filter_entry_name, filter_entry_attrs,
overwrite=False):
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, filter_entry_name,
alib.map_to_aim_filter_entry(filter_entry_attrs))
self.aim.create(aim_ctx, aim_filter_entry, overwrite)
def _get_aim_filters(self, session, policy_rule):
# This gets the Forward and Reverse Filters from the AIM DB
aim_ctx = aim_context.AimContext(session)
filters = {}
for k, v in six.iteritems(FILTER_DIRECTIONS):
aim_filter = self._aim_filter(session, policy_rule, v)
aim_filter_fetched = self.aim.get(aim_ctx, aim_filter)
if not aim_filter_fetched:
LOG.debug("No %s Filter found in AIM DB", k)
else:
LOG.debug("Got Filter: %s", vars(aim_filter_fetched))
filters[k] = aim_filter_fetched
return filters
def _get_aim_filter_names(self, session, policy_rule):
# Forward and Reverse AIM Filter names for a Policy Rule
aim_filters = self._get_aim_filters(session, policy_rule)
aim_filter_names = [f.name for f in aim_filters.values() if f]
return aim_filter_names
def _get_aim_filter_entries(self, session, policy_rule):
# This gets the Forward and Reverse FilterEntries from the AIM DB
aim_ctx = aim_context.AimContext(session)
filters = self._get_aim_filters(session, policy_rule)
filters_entries = {}
for k, v in six.iteritems(filters):
if v:
aim_filter_entries = self.aim.find(
aim_ctx, aim_resource.FilterEntry,
tenant_name=v.tenant_name, filter_name=v.name)
if not aim_filter_entries:
LOG.debug("No %s FilterEntry found in AIM DB", k)
else:
LOG.debug("Got FilterEntry: %s", str(aim_filter_entries))
filters_entries[k] = aim_filter_entries
return filters_entries
def _aim_contract(self, session, policy_rule_set):
# This returns a new AIM Contract resource
return aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, policy_rule_set['tenant_id'], aim_resource.Contract),
name=self.name_mapper.policy_rule_set(
session, policy_rule_set['id']),
display_name=policy_rule_set['name'])
def _aim_contract_subject(self, aim_contract, in_filters=None,
out_filters=None, bi_filters=None):
# This returns a new AIM ContractSubject resource
if not in_filters:
in_filters = []
if not out_filters:
out_filters = []
if not bi_filters:
bi_filters = []
display_name = self.aim_display_name(aim_contract.name)
# Since we create one ContractSubject per Contract,
# ContractSubject is given the Contract name
kwargs = {'tenant_name': aim_contract.tenant_name,
'contract_name': aim_contract.name,
'name': aim_contract.name,
'display_name': display_name,
'in_filters': in_filters,
'out_filters': out_filters,
'bi_filters': bi_filters}
aim_contract_subject = aim_resource.ContractSubject(**kwargs)
return aim_contract_subject
def _populate_aim_contract_subject(self, context, aim_contract,
policy_rules):
in_filters, out_filters = [], []
session = context._plugin_context.session
for rule in policy_rules:
aim_filters = self._get_aim_filter_names(session, rule)
classifier = context._plugin.get_policy_classifier(
context._plugin_context, rule['policy_classifier_id'])
if classifier['direction'] == g_const.GP_DIRECTION_IN:
for fltr in aim_filters:
if fltr.startswith(alib.REVERSE_PREFIX):
out_filters.append(fltr)
else:
in_filters.append(fltr)
elif classifier['direction'] == g_const.GP_DIRECTION_OUT:
for fltr in aim_filters:
if fltr.startswith(alib.REVERSE_PREFIX):
in_filters.append(fltr)
else:
out_filters.append(fltr)
else:
in_filters += aim_filters
out_filters += aim_filters
self._populate_aim_contract_subject_by_filters(
context, aim_contract, in_filters, out_filters)
def _populate_aim_contract_subject_by_filters(
self, context, aim_contract, in_filters=None, out_filters=None,
bi_filters=None):
if not in_filters:
in_filters = []
if not out_filters:
out_filters = []
if not bi_filters:
bi_filters = []
aim_ctx = self._get_aim_context(context)
aim_contract_subject = self._aim_contract_subject(
aim_contract, in_filters, out_filters, bi_filters)
self.aim.create(aim_ctx, aim_contract_subject, overwrite=True)
def _get_aim_contract_names(self, session, prs_id_list):
contract_list = []
for prs_id in prs_id_list:
contract_name = self.name_mapper.policy_rule_set(session, prs_id)
contract_list.append(contract_name)
return contract_list
def _get_aim_contract_subject(self, session, policy_rule_set):
# This gets a ContractSubject from the AIM DB
aim_ctx = aim_context.AimContext(session)
contract = self._aim_contract(session, policy_rule_set)
contract_subject = self._aim_contract_subject(contract)
contract_subject_fetched = self.aim.get(aim_ctx, contract_subject)
if not contract_subject_fetched:
LOG.debug("No Contract found in AIM DB")
else:
LOG.debug("Got ContractSubject: %s",
vars(contract_subject_fetched))
return contract_subject_fetched
def _delete_aim_contract_subject(self, aim_context, aim_contract):
aim_contract_subject = self._aim_contract_subject(aim_contract)
self.aim.delete(aim_context, aim_contract_subject)
def _get_aim_default_endpoint_group(self, session, network):
return self.aim_mech_driver.get_epg_for_network(session, network)
def _get_l2p_subnets(self, context, l2p_id):
plugin_context = context._plugin_context
l2p = context._plugin.get_l2_policy(plugin_context, l2p_id)
# REVISIT: The following should be a get_subnets call via local API
return self._core_plugin.get_subnets_by_network(
plugin_context, l2p['network_id'])
def _sync_ptg_subnets(self, context, l2p):
l2p_subnets = [x['id'] for x in
self._get_l2p_subnets(context, l2p['id'])]
ptgs = context._plugin._get_policy_target_groups(
context._plugin_context.elevated(), {'l2_policy_id': [l2p['id']]})
for sub in l2p_subnets:
# Add to PTG
for ptg in ptgs:
if sub not in ptg['subnets']:
try:
(context._plugin.
_add_subnet_to_policy_target_group(
context._plugin_context.elevated(),
ptg['id'], sub))
except gpolicy.PolicyTargetGroupNotFound as e:
LOG.warning(e)
def _use_implicit_subnet(self, context, force_add=False):
"""Implicit subnet for AIM.
The first PTG in a L2P will allocate a new subnet from the L3P.
Any subsequent PTG in the same L2P will use the same subnet.
Additional subnets will be allocated as and when the currently used
subnet runs out of IP addresses.
"""
l2p_id = context.current['l2_policy_id']
with lockutils.lock(l2p_id, external=True):
subs = self._get_l2p_subnets(context, l2p_id)
subs = set([x['id'] for x in subs])
added = []
if not subs or force_add:
l2p = context._plugin.get_l2_policy(
context._plugin_context, l2p_id)
name = APIC_OWNED + l2p['name']
added = super(
AIMMappingDriver,
self)._use_implicit_subnet_from_subnetpool(
context, subnet_specifics={'name': name})
context.add_subnets(subs - set(context.current['subnets']))
if added:
self._sync_ptg_subnets(context, l2p)
l3p = self._get_l3p_for_l2policy(context, l2p_id)
for r in l3p['routers']:
self._attach_router_to_subnets(context._plugin_context,
r, added)
def _create_implicit_contracts(self, context, l3p):
self._process_contracts_for_default_epg(context, l3p)
def _configure_contracts_for_default_epg(self, context, l3p, epg_dn):
self._process_contracts_for_default_epg(
context, l3p, epg_dn=epg_dn, create=False, delete=False)
def _delete_implicit_contracts(self, context, l3p):
self._process_contracts_for_default_epg(
context, l3p, epg_dn=None, create=False, delete=True)
def _get_implicit_contracts_for_default_epg(
self, context, l3p, epg_dn):
return self._process_contracts_for_default_epg(
context, l3p, epg_dn=epg_dn, get=True)
def _process_contracts_for_default_epg(
self, context, l3p, epg_dn=None, create=True, delete=False, get=False):
# get=True overrides the create and delete cases, and returns a dict
# with the Contracts, ContractSubjects, Filters, and FilterEntries
# for the default EPG
# create=True, delete=False means create everything and add Contracts
# to the default EPG
# create=False, delete=False means only add Contracts to the default
# EPG
# create=False, delete=True means only remove Contracts from the
# default EPG and delete them
# create=True, delete=True is not a valid combination
if create and delete:
LOG.error("Incorrect use of internal method "
"_process_contracts_for_default_epg(), create and "
"delete cannot be True at the same time")
raise
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
# Infra Services' FilterEntries and attributes
infra_entries = alib.get_service_contract_filter_entries()
# ARP FilterEntry and attributes
arp_entries = alib.get_arp_filter_entry()
contracts = {alib.SERVICE_PREFIX: infra_entries,
alib.IMPLICIT_PREFIX: arp_entries}
for contract_name_prefix, entries in six.iteritems(contracts):
contract_name = self.name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
# Create Contract (one per l3_policy)
aim_contract = aim_resource.Contract(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Contract),
name=contract_name, display_name=contract_name)
if get:
aim_resources = {}
aim_resources[FILTERS] = []
aim_resources[FILTER_ENTRIES] = []
aim_resources[CONTRACT_SUBJECTS] = []
contract_fetched = self.aim.get(aim_ctx, aim_contract)
aim_resources[CONTRACTS] = [contract_fetched]
else:
if create:
self.aim.create(aim_ctx, aim_contract, overwrite=True)
if not delete and epg_dn:
aim_epg = self.aim.get(
aim_ctx, aim_resource.EndpointGroup.from_dn(epg_dn))
# Add Contracts to the default EPG
if contract_name_prefix == alib.IMPLICIT_PREFIX:
# Default EPG provides and consumes ARP Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name],
consumed_contracts=[contract_name])
else:
# Default EPG provides Infra Services' Contract
self._add_contracts_for_epg(
aim_ctx, aim_epg,
provided_contracts=[contract_name])
continue
filter_names = []
for k, v in six.iteritems(entries):
filter_name = self.name_mapper.l3_policy(
session, l3p['id'],
prefix=''.join([contract_name_prefix, k, '-']))
# Create Filter (one per l3_policy)
aim_filter = aim_resource.Filter(
tenant_name=self._aim_tenant_name(
session, l3p['tenant_id'], aim_resource.Filter),
name=filter_name, display_name=filter_name)
if get:
filter_fetched = self.aim.get(aim_ctx, aim_filter)
aim_resources[FILTERS].append(filter_fetched)
aim_filter_entry = self._aim_filter_entry(
session, aim_filter, k,
alib.map_to_aim_filter_entry(v))
entry_fetched = self.aim.get(aim_ctx, aim_filter_entry)
aim_resources[FILTER_ENTRIES].append(entry_fetched)
else:
if create:
self.aim.create(aim_ctx, aim_filter, overwrite=True)
# Create FilterEntries (one per l3_policy) and
# associate with Filter
self._create_aim_filter_entry(
session, aim_ctx, aim_filter, k, v, overwrite=True)
filter_names.append(aim_filter.name)
if delete:
self._delete_aim_filter_entries(aim_ctx, aim_filter)
self.aim.delete(aim_ctx, aim_filter)
if get:
aim_contract_subject = self._aim_contract_subject(aim_contract)
subject_fetched = self.aim.get(aim_ctx, aim_contract_subject)
aim_resources[CONTRACT_SUBJECTS].append(subject_fetched)
return aim_resources
else:
if create:
# Create ContractSubject (one per l3_policy) with relevant
# Filters, and associate with Contract
self._populate_aim_contract_subject_by_filters(
context, aim_contract, bi_filters=filter_names)
if delete:
self._delete_aim_contract_subject(aim_ctx, aim_contract)
self.aim.delete(aim_ctx, aim_contract)
def _add_implicit_svc_contracts_to_epg(self, context, l2p, aim_epg):
session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session)
implicit_contract_name = self.name_mapper.l3_policy(
session, l2p['l3_policy_id'], prefix=alib.IMPLICIT_PREFIX)
service_contract_name = self.name_mapper.l3_policy(
session, l2p['l3_policy_id'], prefix=alib.SERVICE_PREFIX)
self._add_contracts_for_epg(aim_ctx, aim_epg,
provided_contracts=[implicit_contract_name],
consumed_contracts=[implicit_contract_name, service_contract_name])
def _add_contracts_for_epg(self, aim_ctx, aim_epg, provided_contracts=None,
consumed_contracts=None):
if provided_contracts:
aim_epg.provided_contract_names += provided_contracts
if consumed_contracts:
aim_epg.consumed_contract_names += consumed_contracts
self.aim.create(aim_ctx, aim_epg, overwrite=True)
def _merge_gbp_status(self, gbp_resource_list):
merged_status = gp_const.STATUS_ACTIVE
for gbp_resource in gbp_resource_list:
if gbp_resource['status'] == gp_const.STATUS_BUILD:
merged_status = gp_const.STATUS_BUILD
elif gbp_resource['status'] == gp_const.STATUS_ERROR:
merged_status = gp_const.STATUS_ERROR
break
return merged_status
def _map_ml2plus_status(self, sync_status):
if not sync_status:
# REVIST(Sumit)
return gp_const.STATUS_BUILD
if sync_status == cisco_apic.SYNC_ERROR:
return gp_const.STATUS_ERROR
elif sync_status == cisco_apic.SYNC_BUILD:
return gp_const.STATUS_BUILD
else:
return gp_const.STATUS_ACTIVE
def _process_subnets_for_ptg_delete(self, context, subnet_ids, router_ids):
plugin_context = context._plugin_context
if subnet_ids:
for subnet_id in subnet_ids:
# Clean-up subnet if this is the last PTG using the L2P.
if not context._plugin._get_ptgs_for_subnet(
plugin_context, subnet_id):
for router_id in router_ids:
# If the subnet interface for this router has
# already been removed (say manually), the
# call to Neutron's remove_router_interface
# will cause the transaction to exit immediately.
# To avoid this, we first check if this subnet
# still has an interface on this router.
if self._get_router_interface_port_by_subnet(
plugin_context, router_id, subnet_id):
self._detach_router_from_subnets(
plugin_context, router_id, [subnet_id])
self._cleanup_subnet(plugin_context, subnet_id)
def _map_aim_status(self, session, aim_resource_obj):
# Note that this implementation assumes that this driver
# is the only policy driver configured, and no merging
# with any previous status is required.
aim_ctx = aim_context.AimContext(session)
aim_status = self.aim.get_status(
aim_ctx, aim_resource_obj, create_if_absent=False)
if not aim_status:
# REVIST(Sumit)
return gp_const.STATUS_BUILD
if aim_status.is_error():
return gp_const.STATUS_ERROR
elif aim_status.is_build():
return gp_const.STATUS_BUILD
else:
return gp_const.STATUS_ACTIVE
def _merge_aim_status(self, session, aim_resource_obj_list):
# Note that this implementation assumes that this driver
# is the only policy driver configured, and no merging
# with any previous status is required.
# When merging states of multiple AIM objects, the status
# priority is ERROR > BUILD > ACTIVE.
merged_status = gp_const.STATUS_ACTIVE
for aim_obj in aim_resource_obj_list:
status = self._map_aim_status(session, aim_obj)
if status != gp_const.STATUS_ACTIVE:
merged_status = status
if merged_status == gp_const.STATUS_ERROR:
break
return merged_status
def _db_plugin(self, plugin_obj):
return super(gbp_plugin.GroupPolicyPlugin, plugin_obj)
def _get_aim_context(self, context):
if hasattr(context, 'session'):
session = context.session
else:
session = context._plugin_context.session
return aim_context.AimContext(session)
# REVISIT: Called by mechanism driver when binding a port using
# DVS. Consider a more general way for neutron ports to be bound
# using a non-default EPG.
def _get_port_epg(self, plugin_context, port):
ptg, pt = self._port_id_to_ptg(plugin_context, port['id'])
if ptg:
# TODO(Kent): optimize this also for GBP workflow?
return self._get_aim_endpoint_group(plugin_context.session, ptg)
else:
# Return default EPG based on network
network = self._get_network(plugin_context, port['network_id'])
epg = self._get_aim_default_endpoint_group(plugin_context.session,
network)
if not epg:
# Something is wrong, default EPG doesn't exist.
# TODO(ivar): should rise an exception
LOG.error("Default EPG doesn't exist for "
"port %s", port['id'])
return epg
def _get_vrf_by_dn(self, context, vrf_dn):
aim_context = self._get_aim_context(context)
vrf = self.aim.get(
aim_context, aim_resource.VRF.from_dn(vrf_dn))
return vrf
def _check_l3policy_ext_segment(self, context, l3policy):
if l3policy['external_segments']:
for allocations in l3policy['external_segments'].values():
if len(allocations) > 1:
raise alib.OnlyOneAddressIsAllowedPerExternalSegment()
# if NAT is disabled, allow only one L3P per ES
ess = context._plugin.get_external_segments(
context._plugin_context,
filters={'id': list(l3policy['external_segments'].keys())})
for es in ess:
ext_net = self._ext_segment_2_ext_network(context, es)
if (ext_net and
ext_net.get(cisco_apic.NAT_TYPE) in
('distributed', 'edge')):
continue
if [x for x in es['l3_policies'] if x != l3policy['id']]:
raise alib.OnlyOneL3PolicyIsAllowedPerExternalSegment()
def _check_external_policy(self, context, ep):
if ep.get('shared', False):
# REVISIT(amitbose) This could be relaxed
raise alib.SharedExternalPolicyUnsupported()
ess = context._plugin.get_external_segments(
context._plugin_context,
filters={'id': ep['external_segments']})
for es in ess:
other_eps = context._plugin.get_external_policies(
context._plugin_context,
filters={'id': es['external_policies'],
'tenant_id': [ep['tenant_id']]})
if [x for x in other_eps if x['id'] != ep['id']]:
raise alib.MultipleExternalPoliciesForL3Policy()
def _get_l3p_subnets(self, context, l3policy):
l2p_sn = []
for l2p_id in l3policy['l2_policies']:
l2p_sn.extend(self._get_l2p_subnets(context, l2p_id))
return l2p_sn
def _ext_segment_2_ext_network(self, context, ext_segment):
subnet = self._get_subnet(context._plugin_context,
ext_segment['subnet_id'])
if subnet:
return self._get_network(context._plugin_context,
subnet['network_id'])
def _map_ext_segment_to_routers(self, context, ext_segments,
routers):
net_to_router = {r['external_gateway_info']['network_id']: r
for r in routers
if r.get('external_gateway_info')}
result = {}
for es in ext_segments:
sn = self._get_subnet(context._plugin_context, es['subnet_id'])
router = net_to_router.get(sn['network_id']) if sn else None
if router:
result[es['id']] = router
return result
def _plug_l3p_routers_to_ext_segment(self, context, l3policy,
ext_seg_info):
plugin_context = context._plugin_context
es_list = self._get_external_segments(plugin_context,
filters={'id': list(ext_seg_info.keys())})
l3p_subs = self._get_l3p_subnets(context, l3policy)
# REVISIT: We are not re-using the first router created
# implicitly for the L3Policy (or provided explicitly by the
# user). Consider using that for the first external segment
for es in es_list:
router_id = self._use_implicit_router(context,
router_name=l3policy['name'] + '-' + es['name'])
router = self._create_router_gw_for_external_segment(
context._plugin_context, es, ext_seg_info, router_id)
if not ext_seg_info[es['id']] or not ext_seg_info[es['id']][0]:
# Update L3P assigned address
efi = router['external_gateway_info']['external_fixed_ips']
assigned_ips = [x['ip_address'] for x in efi
if x['subnet_id'] == es['subnet_id']]
context.set_external_fixed_ips(es['id'], assigned_ips)
if es['external_policies']:
ext_policy = self._get_external_policies(plugin_context,
filters={'id': es['external_policies'],
'tenant_id': [l3policy['tenant_id']]})
if ext_policy:
self._set_router_ext_contracts(context, router_id,
ext_policy[0])
self._attach_router_to_subnets(plugin_context, router_id, l3p_subs)
def _unplug_l3p_routers_from_ext_segment(self, context, l3policy,
ext_seg_ids, deleting=False):
plugin_context = context._plugin_context
es_list = self._get_external_segments(plugin_context,
filters={'id': ext_seg_ids})
routers = self._get_routers(plugin_context,
filters={'id': l3policy['routers']})
es_2_router = self._map_ext_segment_to_routers(context, es_list,
routers)
for r in es_2_router.values():
router_subs = self._get_router_interface_subnets(plugin_context,
r['id'])
self._detach_router_from_subnets(plugin_context, r['id'],
router_subs)
if not deleting:
context.remove_router(r['id'])
self._cleanup_router(plugin_context, r['id'])
def _get_router_interface_subnets(self, plugin_context, router_id):
router_ports = self._get_ports(plugin_context,
filters={'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router_id]})
return set(y['subnet_id']
for x in router_ports for y in x['fixed_ips'])
def _get_router_interface_port_by_subnet(self, plugin_context,
router_id, subnet_id):
router_ports = self._get_ports(plugin_context,
filters={'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router_id],
'fixed_ips': {'subnet_id': [subnet_id]}})
return (router_ports or [None])[0]
def _attach_router_to_subnets(self, plugin_context, router_id, subs):
# On account of sharing configuration, the router and subnets might
# be in different tenants, hence we need to use admin context
plugin_context = plugin_context.elevated()
rtr_sn = self._get_router_interface_subnets(plugin_context, router_id)
for subnet in subs:
if subnet['id'] in rtr_sn: # already attached
continue
gw_port = self._get_ports(plugin_context,
filters={'fixed_ips': {'ip_address': [subnet['gateway_ip']],
'subnet_id': [subnet['id']]}})
if gw_port:
# Gateway port is in use, create new interface port
attrs = {'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'fixed_ips': [{'subnet_id': subnet['id']}],
'device_id': '',
'device_owner': n_constants.DEVICE_OWNER_ROUTER_INTF,
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'name': '%s-%s' % (router_id, subnet['id']),
'admin_state_up': True}
try:
intf_port = self._create_port(plugin_context, attrs)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create explicit router '
'interface port in subnet '
'%(subnet)s',
{'subnet': subnet['id']})
interface_info = {'port_id': intf_port['id'],
NO_VALIDATE: True}
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
except n_exc.BadRequest:
self._delete_port(plugin_context, intf_port['id'])
with excutils.save_and_reraise_exception():
LOG.exception('Attaching router %(router)s to '
'%(subnet)s with explicit port '
'%(port) failed',
{'subnet': subnet['id'],
'router': router_id,
'port': intf_port['id']})
else:
self._plug_router_to_subnet(plugin_context, subnet['id'],
router_id)
def _plug_router_to_subnet(self, plugin_context, subnet_id, router_id):
interface_info = {'subnet_id': subnet_id,
NO_VALIDATE: True}
if router_id:
try:
self._add_router_interface(plugin_context, router_id,
interface_info)
except n_exc.BadRequest as e:
LOG.exception("Adding subnet to router failed, exception:"
"%s", e)
raise exc.GroupPolicyInternalError()
def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids):
for subnet_id in sn_ids:
# Use admin context because router and subnet may be in
# different tenants
self._remove_router_interface(plugin_context.elevated(),
router_id,
{'subnet_id': subnet_id})
def _set_router_ext_contracts(self, context, router_id, ext_policy):
session = context._plugin_context.session
prov = []
cons = []
if ext_policy:
prov = self._get_aim_contract_names(session,
ext_policy['provided_policy_rule_sets'])
cons = self._get_aim_contract_names(session,
ext_policy['consumed_policy_rule_sets'])
attr = {cisco_apic_l3.EXTERNAL_PROVIDED_CONTRACTS: prov,
cisco_apic_l3.EXTERNAL_CONSUMED_CONTRACTS: cons}
self._update_router(context._plugin_context, router_id, attr)
def _get_ext_policy_routers(self, context, ext_policy, ext_seg_ids):
plugin_context = context._plugin_context
es = self._get_external_segments(plugin_context,
filters={'id': ext_seg_ids})
subs = self._get_subnets(context._plugin_context,
filters={'id': [e['subnet_id'] for e in es]})
ext_net = {s['network_id'] for s in subs}
l3ps = set([l3p for e in es for l3p in e['l3_policies']])
l3ps = self._get_l3_policies(plugin_context,
filters={'id': l3ps,
'tenant_id': [ext_policy['tenant_id']]})
routers = self._get_routers(plugin_context,
filters={'id': [r for l in l3ps for r in l['routers']]})
return [r['id'] for r in routers
if (r['external_gateway_info'] or {}).get('network_id') in ext_net]
def _get_auto_ptg_name(self, l2p):
return AUTO_PTG_NAME_PREFIX % l2p['id']
def _get_auto_ptg_id(self, l2p_id):
if l2p_id:
return AUTO_PTG_ID_PREFIX % hashlib.md5(
l2p_id.encode('utf-8')).hexdigest()
def _is_auto_ptg(self, ptg):
return ptg['id'].startswith(AUTO_PTG_PREFIX)
def _get_policy_enforcement_pref(self, ptg):
if ptg['intra_ptg_allow']:
policy_enforcement_pref = UNENFORCED
else:
policy_enforcement_pref = ENFORCED
return policy_enforcement_pref
def _map_policy_enforcement_pref(self, epg):
if epg.policy_enforcement_pref == UNENFORCED:
return True
else:
return False
def _get_epg_by_dn(self, context, epg_dn):
aim_context = self._get_aim_context(context)
epg = self.aim.get(
aim_context, aim_resource.EndpointGroup.from_dn(epg_dn))
return epg
def _get_epg_name_from_dn(self, context, epg_dn):
aim_context = self._get_aim_context(context)
default_epg_name = self.aim.get(
aim_context, aim_resource.EndpointGroup.from_dn(epg_dn)).name
return default_epg_name
def apic_epg_name_for_policy_target_group(self, session, ptg_id,
name=None, context=None):
if not context:
context = gbp_utils.get_current_context()
# get_network can do a DB write, hence we use a writer
with db_api.CONTEXT_WRITER.using(context):
query = BAKERY(lambda s: s.query(
gpmdb.PolicyTargetGroupMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('ptg_id'))
ptg_db = query(session).params(
ptg_id=ptg_id).first()
if ptg_db and self._is_auto_ptg(ptg_db):
query = BAKERY(lambda s: s.query(
gpmdb.L2PolicyMapping))
query += lambda q: q.filter_by(
id=sa.bindparam('l2p_id'))
l2p_db = query(session).params(
l2p_id=ptg_db['l2_policy_id']).first()
network_id = l2p_db['network_id']
admin_context = n_context.get_admin_context()
net = self._get_network(admin_context, network_id)
default_epg_dn = net[cisco_apic.DIST_NAMES][cisco_apic.EPG]
default_epg_name = self._get_epg_name_from_dn(
admin_context, default_epg_dn)
return default_epg_name
else:
return ptg_id
def apic_ap_name_for_application_policy_group(self, session, apg_id):
if apg_id:
return self.name_mapper.application_policy_group(
session, apg_id)
else:
return self.aim_mech_driver.ap_name
def _get_default_security_group(self, plugin_context, ptg_id,
tenant_id):
filters = {'name': [DEFAULT_SG_NAME], 'tenant_id': [tenant_id]}
default_group = self._get_sgs(plugin_context, filters)
return default_group[0]['id'] if default_group else None
def _create_default_security_group(self, plugin_context, tenant_id):
# Allow all
sg_id = self._get_default_security_group(plugin_context, '', tenant_id)
ip_v = [(n_constants.IPv4, '0.0.0.0/0'), (n_constants.IPv6, '::/0')]
if not sg_id:
sg_name = DEFAULT_SG_NAME
sg = self._create_gbp_sg(plugin_context, tenant_id, sg_name,
description='default GBP security group')
sg_id = sg['id']
for v, g in ip_v:
self._sg_rule(plugin_context, tenant_id, sg_id,
'ingress', cidr=g, ethertype=v)
self._sg_rule(plugin_context, tenant_id, sg_id,
'egress', cidr=g, ethertype=v)
def _use_implicit_port(self, context, subnets=None):
self._create_default_security_group(context._plugin_context,
context.current['tenant_id'])
super(AIMMappingDriver, self)._use_implicit_port(
context, subnets=subnets)
def _handle_create_network_service_policy(self, context):
self._validate_nat_pool_for_nsp(context)
self._handle_network_service_policy(context)
def _get_prss_for_policy_rules(self, context, pr_ids):
if not pr_ids:
return []
query = BAKERY(lambda s: s.query(
gpdb.PolicyRuleSet))
query += lambda q: q.join(
gpdb.PRSToPRAssociation,
gpdb.PRSToPRAssociation.policy_rule_set_id ==
gpdb.PolicyRuleSet.id)
query += lambda q: q.join(
gpdb.PolicyRule,
gpdb.PRSToPRAssociation.policy_rule_id == gpdb.PolicyRule.id)
query += lambda q: q.filter(
gpdb.PolicyRule.id.in_(sa.bindparam('pr_ids', expanding=True)))
return [self._get_policy_rule_set(
context._plugin_context, x['id']) for x in (
query(context._plugin_context.session).params(
pr_ids=pr_ids).all())]
def _create_per_l3p_implicit_contracts(self):
admin_context = n_context.get_admin_context()
context = type('', (object,), {})()
context._plugin_context = admin_context
session = admin_context.session
aim_ctx = aim_context.AimContext(session)
contract_name_prefix = list(alib.get_service_contract_filter_entries(
).keys())[0]
query = BAKERY(lambda s: s.query(
gpmdb.L3PolicyMapping))
l3ps = query(session).all()
name_mapper = apic_mapper.APICNameMapper()
aim_mgr = aim_manager.AimManager()
self._aim = aim_mgr
self._name_mapper = name_mapper
orig_aim_tenant_name = self._aim_tenant_name
def _aim_tenant_name(self, session, tenant_id, aim_resource_class=None,
gbp_resource=None, gbp_obj=None):
attrs = aim_resource.Tenant(
name=md.COMMON_TENANT_NAME, display_name='')
tenant = aim_mgr.get(aim_ctx, attrs)
if not tenant:
tenant = aim_mgr.create(aim_ctx, attrs)
return md.COMMON_TENANT_NAME
self._aim_tenant_name = _aim_tenant_name
for l3p in l3ps:
implicit_contract_name = name_mapper.l3_policy(
session, l3p['id'], prefix=contract_name_prefix)
if not aim_mgr.find(
aim_ctx, aim_resource.Contract,
name=implicit_contract_name):
self._create_implicit_contracts(context, l3p)
self._aim = None
self._name_mapper = None
self._aim_tenant_name = orig_aim_tenant_name
def validate_neutron_mapping(self, mgr):
# REVISIT: Implement.
pass
def validate_aim_mapping(self, mgr):
# REVISIT: Register all AIM resource types used by GBP mapping
# but not the Neutron mapping.
# REVISIT: Register DB tables to be validated.
# Determine expected AIM resources and DB records for each
# GBP resource type.
self._validate_l3_policies(mgr)
self._validate_l2_policies(mgr)
self._validate_policy_target_groups(mgr)
self._validate_policy_targets(mgr)
self._validate_application_policy_groups(mgr)
self._validate_policy_classifiers(mgr)
self._validate_policy_rule_sets(mgr)
self._validate_external_segments(mgr)
self._validate_external_policies(mgr)
# REVISIT: Do any of the following top-level GBP resources map
# to or effect AIM resources: NetworkServicePolicy,
# PolicyAction, NATPool?
def _validate_l3_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.L3Policy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for L3P not yet implemented")
def _validate_l2_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.L2Policy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for L2P not yet implemented")
def _validate_policy_target_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyTargetGroup))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PTG not yet implemented")
def _validate_policy_targets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyTarget))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PT not yet implemented")
def _validate_application_policy_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.ApplicationPolicyGroup))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for APG not yet implemented")
def _validate_policy_classifiers(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyClassifier))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PC not yet implemented")
def _validate_policy_rule_sets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.PolicyRuleSet))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for PRS not yet implemented")
def _validate_external_segments(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources. This should probably be called from
# validate_neutron_mapping rather than validate_aim_mapping,
# since external_routes maps to the cisco_apic.EXTERNAL_CIDRS
# network extension.
query = BAKERY(lambda s: s.query(
gpdb.ExternalSegment))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for ES not yet implemented")
def _validate_external_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
query = BAKERY(lambda s: s.query(
gpdb.ExternalPolicy))
if query(mgr.actual_session).first():
mgr.validation_failed(
"GBP->AIM validation for EP not yet implemented")
| apache-2.0 | -2,766,098,963,838,720,500 | 43.806975 | 85 | 0.58033 | false | 3.867348 | false | false | false |
rudolfbyker/cart-converters | parse_rs_email.py | 1 | 1219 | #!/usr/bin/env python2
import sys
import os
import unicodecsv as csv
input_filename = sys.argv[1]
output_filename = os.path.splitext(input_filename)[0] + ".csv"
start = False
lines = []
items = []
header = [
"Unit price",
"Qty",
"RS Stock No.",
"Manufacturer Part Number",
"Manufacturer",
"Description",
]
with open(input_filename, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t')
for line in csvreader:
line = [l.strip() for l in line]
if start:
lines.append(line)
elif "RS Stock No." in line and len(line) == 9:
start = True
for i in range(len(lines)):
if len(lines[i]) == 7 and len(lines[i+1]) == 2 and len(lines[i+2]) == 3:
item = [
lines[i+2][1], # Unit price
lines[i][0], # Qty
lines[i][1], # RS Stock No.
lines[i][2], # Manufacturer Part Number
lines[i][3], # Manufacturer
lines[i][5], # Description
]
items.append(item)
with open(output_filename, 'wb') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(header)
csvwriter.writerows(items)
| mit | 8,708,309,153,482,511,000 | 22.901961 | 76 | 0.548811 | false | 3.376731 | false | false | false |
gulopine/steel | examples/images/bmp.py | 1 | 1701 | import steel
COMPRESSION_TYPES = (
(0, 'No compression'),
(1, '8-bit RLE'),
(2, '4-bit RLE'),
(3, 'Bit Field'),
(4, 'JPEG'), # Generally not supported for screen display
(5, 'PNG'), # Generally not supported for screen display
)
class PaletteColor(steel.Structure):
blue = steel.Integer(size=1)
green = steel.Integer(size=1)
red = steel.Integer(size=1)
alpha = steel.Integer(size=1)
def __str__(self):
return '#%x%x%x%x' % (self.red, self.green, self.blue, self.alpha)
class BMP(steel.Structure, endianness=steel.LittleEndian):
signature = steel.FixedString('BM')
filesize = steel.Integer('Total file size', size=4)
steel.Reserved(size=4)
data_offset = steel.Integer('Offset of the actual image data', size=4)
header_size = steel.Integer(size=4, default=40)
width = steel.Integer(size=4)
height = steel.Integer(size=4)
plane_count = steel.Integer(size=2, default=1)
bit_depth = steel.Integer(size=2)
compression_type = steel.Integer(size=4, choices=COMPRESSION_TYPES, default=0)
data_size = steel.Integer('Size of the actual image data', size=4)
ppm_x = steel.Integer('Pixels per meter (X axis)', size=4)
ppm_y = steel.Integer('Pixels per meter (Y axis)', size=4)
color_count = steel.Integer('Number of colors', size=4)
important_color_count = steel.Integer('Number of important colors', size=4)
palette = steel.List(PaletteColor, size=color_count)
pixel_data = steel.Bytes(size=steel.Remainder)
if __name__ == '__main__':
import sys
bmp = BMP(open(sys.argv[1], 'rb'))
print('%s x %s' % (bmp.width, bmp.height))
| bsd-3-clause | 8,662,351,827,459,457,000 | 34.978261 | 82 | 0.634333 | false | 3.092727 | false | false | false |
samba-team/samba | python/samba/tests/gpo.py | 1 | 82209 | # Unix SMB/CIFS implementation. Tests for smb manipulation
# Copyright (C) David Mulder <[email protected]> 2018
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, grp, pwd
import errno
from samba import gpo, tests
from samba.gpclass import register_gp_extension, list_gp_extensions, \
unregister_gp_extension, GPOStorage
from samba.param import LoadParm
from samba.gpclass import check_refresh_gpo_list, check_safe_path, \
check_guid, parse_gpext_conf, atomic_write_conf, get_deleted_gpos_list
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile, TemporaryDirectory
from samba.gp_sec_ext import gp_krb_ext, gp_access_ext
from samba.gp_scripts_ext import gp_scripts_ext
from samba.gp_sudoers_ext import gp_sudoers_ext
from samba.vgp_sudoers_ext import vgp_sudoers_ext
from samba.vgp_symlink_ext import vgp_symlink_ext
from samba.gpclass import gp_inf_ext
from samba.gp_smb_conf_ext import gp_smb_conf_ext
from samba.vgp_files_ext import vgp_files_ext
from samba.vgp_openssh_ext import vgp_openssh_ext
from samba.vgp_startup_scripts_ext import vgp_startup_scripts_ext
from samba.vgp_motd_ext import vgp_motd_ext
from samba.vgp_issue_ext import vgp_issue_ext
from samba.vgp_access_ext import vgp_access_ext
from samba.gp_gnome_settings_ext import gp_gnome_settings_ext
import logging
from samba.credentials import Credentials
from samba.gp_msgs_ext import gp_msgs_ext
from samba.common import get_bytes
from samba.dcerpc import preg
from samba.ndr import ndr_pack
import codecs
from shutil import copyfile
import xml.etree.ElementTree as etree
import hashlib
from samba.gp_parse.gp_pol import GPPolParser
from glob import glob
from configparser import ConfigParser
realm = os.environ.get('REALM')
policies = realm + '/POLICIES'
realm = realm.lower()
poldir = r'\\{0}\sysvol\{0}\Policies'.format(realm)
# the first part of the base DN varies by testenv. Work it out from the realm
base_dn = 'DC={0},DC=samba,DC=example,DC=com'.format(realm.split('.')[0])
dspath = 'CN=Policies,CN=System,' + base_dn
gpt_data = '[General]\nVersion=%d'
gnome_test_reg_pol = \
b"""
<?xml version="1.0" encoding="utf-8"?>
<PolFile num_entries="26" signature="PReg" version="1">
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Lock Down Enabled Extensions</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Lock Down Specific Settings</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Printing</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable File Saving</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Command-Line Access</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disallow Login Using a Fingerprint</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable User Logout</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable User Switching</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Disable Repartitioning</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Whitelisted Online Accounts</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Compose Key</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Dim Screen when User is Idle</ValueName>
<Value>1</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings</Key>
<ValueName>Enabled Extensions</ValueName>
<Value>1</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Compose Key</Key>
<ValueName>Key Name</ValueName>
<Value>Right Alt</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
<ValueName>Delay</ValueName>
<Value>300</Value>
</Entry>
<Entry type="4" type_name="REG_DWORD">
<Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
<ValueName>Dim Idle Brightness</ValueName>
<Value>30</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>[email protected]</ValueName>
<Value>[email protected]</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
<ValueName>[email protected]</ValueName>
<Value>[email protected]</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/picture-uri</ValueName>
<Value>/org/gnome/desktop/background/picture-uri</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/picture-options</ValueName>
<Value>/org/gnome/desktop/background/picture-options</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/primary-color</ValueName>
<Value>/org/gnome/desktop/background/primary-color</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
<ValueName>/org/gnome/desktop/background/secondary-color</ValueName>
<Value>/org/gnome/desktop/background/secondary-color</Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
<ValueName>**delvals.</ValueName>
<Value> </Value>
</Entry>
<Entry type="1" type_name="REG_SZ">
<Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
<ValueName>google</ValueName>
<Value>google</Value>
</Entry>
</PolFile>
"""
def days2rel_nttime(val):
seconds = 60
minutes = 60
hours = 24
sam_add = 10000000
return -(val * seconds * minutes * hours * sam_add)
def gpupdate(lp, arg):
gpupdate = lp.get('gpo update command')
gpupdate.append(arg)
p = Popen(gpupdate, stdout=PIPE, stderr=PIPE)
stdoutdata, stderrdata = p.communicate()
return p.returncode
def gpupdate_force(lp):
return gpupdate(lp, '--force')
def gpupdate_unapply(lp):
return gpupdate(lp, '--unapply')
def rsop(lp):
return gpupdate(lp, '--rsop')
def stage_file(path, data):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(dirname)):
return False
if os.path.exists(path):
os.rename(path, '%s.bak' % path)
with NamedTemporaryFile(delete=False, dir=os.path.dirname(path)) as f:
f.write(get_bytes(data))
os.rename(f.name, path)
os.chmod(path, 0o644)
return True
def unstage_file(path):
backup = '%s.bak' % path
if os.path.exists(backup):
os.rename(backup, path)
elif os.path.exists(path):
os.remove(path)
class GPOTests(tests.TestCase):
def setUp(self):
super(GPOTests, self).setUp()
self.server = os.environ["SERVER"]
self.dc_account = self.server.upper() + '$'
self.lp = LoadParm()
self.lp.load_default()
self.creds = self.insta_creds(template=self.get_credentials())
def tearDown(self):
super(GPOTests, self).tearDown()
def test_gpo_list(self):
global poldir, dspath
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.creds.get_username())
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
names = ['Local Policy', guid]
file_sys_paths = [None, '%s\\%s' % (poldir, guid)]
ds_paths = [None, 'CN=%s,%s' % (guid, dspath)]
for i in range(0, len(gpos)):
self.assertEqual(gpos[i].name, names[i],
'The gpo name did not match expected name %s' % gpos[i].name)
self.assertEqual(gpos[i].file_sys_path, file_sys_paths[i],
'file_sys_path did not match expected %s' % gpos[i].file_sys_path)
self.assertEqual(gpos[i].ds_path, ds_paths[i],
'ds_path did not match expected %s' % gpos[i].ds_path)
def test_gpo_ads_does_not_segfault(self):
try:
ads = gpo.ADS_STRUCT(self.server, 42, self.creds)
except:
pass
def test_gpt_version(self):
global gpt_data
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
gpo_path = os.path.join(local_path, policies, guid)
old_vers = gpo.gpo_get_sysvol_gpt_version(gpo_path)[1]
with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
gpt.write(gpt_data % 42)
self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], 42,
'gpo_get_sysvol_gpt_version() did not return the expected version')
with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
gpt.write(gpt_data % old_vers)
self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], old_vers,
'gpo_get_sysvol_gpt_version() did not return the expected version')
def test_check_refresh_gpo_list(self):
cache = self.lp.cache_path('gpo_cache')
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.creds.get_username())
check_refresh_gpo_list(self.server, self.lp, self.creds, gpos)
self.assertTrue(os.path.exists(cache),
'GPO cache %s was not created' % cache)
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
gpt_ini = os.path.join(cache, policies,
guid, 'GPT.INI')
self.assertTrue(os.path.exists(gpt_ini),
'GPT.INI was not cached for %s' % guid)
def test_check_refresh_gpo_list_malicious_paths(self):
# the path cannot contain ..
path = '/usr/local/samba/var/locks/sysvol/../../../../../../root/'
self.assertRaises(OSError, check_safe_path, path)
self.assertEqual(check_safe_path('/etc/passwd'), 'etc/passwd')
self.assertEqual(check_safe_path('\\\\etc/\\passwd'), 'etc/passwd')
# there should be no backslashes used to delineate paths
before = 'sysvol/' + realm + '\\Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
after = realm + '/Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
result = check_safe_path(before)
self.assertEqual(result, after, 'check_safe_path() didn\'t'
' correctly convert \\ to /')
def test_check_safe_path_typesafe_name(self):
path = '\\\\toady.suse.de\\SysVol\\toady.suse.de\\Policies\\' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
expected_path = 'toady.suse.de/Policies/' \
'{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
result = check_safe_path(path)
self.assertEqual(result, expected_path,
'check_safe_path unable to detect variable case sysvol components')
def test_gpt_ext_register(self):
this_path = os.path.dirname(os.path.realpath(__file__))
samba_path = os.path.realpath(os.path.join(this_path, '../../../'))
ext_path = os.path.join(samba_path, 'python/samba/gp_sec_ext.py')
ext_guid = '{827D319E-6EAC-11D2-A4EA-00C04F79F83A}'
ret = register_gp_extension(ext_guid, 'gp_access_ext', ext_path,
smb_conf=self.lp.configfile,
machine=True, user=False)
self.assertTrue(ret, 'Failed to register a gp ext')
gp_exts = list_gp_extensions(self.lp.configfile)
self.assertTrue(ext_guid in gp_exts.keys(),
'Failed to list gp exts')
self.assertEqual(gp_exts[ext_guid]['DllName'], ext_path,
'Failed to list gp exts')
unregister_gp_extension(ext_guid)
gp_exts = list_gp_extensions(self.lp.configfile)
self.assertTrue(ext_guid not in gp_exts.keys(),
'Failed to unregister gp exts')
self.assertTrue(check_guid(ext_guid), 'Failed to parse valid guid')
self.assertFalse(check_guid('AAAAAABBBBBBBCCC'), 'Parsed invalid guid')
lp, parser = parse_gpext_conf(self.lp.configfile)
self.assertTrue(lp and parser, 'parse_gpext_conf() invalid return')
parser.add_section('test_section')
parser.set('test_section', 'test_var', ext_guid)
atomic_write_conf(lp, parser)
lp, parser = parse_gpext_conf(self.lp.configfile)
self.assertTrue('test_section' in parser.sections(),
'test_section not found in gpext.conf')
self.assertEqual(parser.get('test_section', 'test_var'), ext_guid,
'Failed to find test variable in gpext.conf')
parser.remove_section('test_section')
atomic_write_conf(lp, parser)
def test_gp_log_get_applied(self):
local_path = self.lp.get('path', 'sysvol')
guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
'{6AC1786C-016F-11D2-945F-00C04FB984F9}']
gpofile = '%s/' + realm + '/Policies/%s/MACHINE/Microsoft/' \
'Windows NT/SecEdit/GptTmpl.inf'
stage = '[System Access]\nMinimumPasswordAge = 998\n'
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
ret = stage_file(gpttmpl, stage)
self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
ret = gpupdate_force(self.lp)
self.assertEqual(ret, 0, 'gpupdate force failed')
gp_db = store.get_gplog(self.dc_account)
applied_guids = gp_db.get_applied_guids()
self.assertEqual(len(applied_guids), 2, 'The guids were not found')
self.assertIn(guids[0], applied_guids,
'%s not in applied guids' % guids[0])
self.assertIn(guids[1], applied_guids,
'%s not in applied guids' % guids[1])
applied_settings = gp_db.get_applied_settings(applied_guids)
for policy in applied_settings:
self.assertIn('System Access', policy[1],
'System Access policies not set')
self.assertIn('minPwdAge', policy[1]['System Access'],
'minPwdAge policy not set')
if policy[0] == guids[0]:
self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
days2rel_nttime(1),
'minPwdAge policy not set')
elif policy[0] == guids[1]:
self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
days2rel_nttime(998),
'minPwdAge policy not set')
ads = gpo.ADS_STRUCT(self.server, self.lp, self.creds)
if ads.connect():
gpos = ads.get_gpo_list(self.dc_account)
del_gpos = get_deleted_gpos_list(gp_db, gpos[:-1])
self.assertEqual(len(del_gpos), 1, 'Returned delete gpos is incorrect')
self.assertEqual(guids[-1], del_gpos[0][0],
'GUID for delete gpo is incorrect')
self.assertIn('System Access', del_gpos[0][1],
'System Access policies not set for removal')
self.assertIn('minPwdAge', del_gpos[0][1]['System Access'],
'minPwdAge policy not set for removal')
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
unstage_file(gpttmpl)
ret = gpupdate_unapply(self.lp)
self.assertEqual(ret, 0, 'gpupdate unapply failed')
def test_process_group_policy(self):
local_path = self.lp.cache_path('gpo_cache')
guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
'{6AC1786C-016F-11D2-945F-00C04FB984F9}']
gpofile = '%s/' + policies + '/%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF'
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_krb_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
stage = '[Kerberos Policy]\nMaxTicketAge = %d\n'
opts = [100, 200]
for i in range(0, 2):
gpttmpl = gpofile % (local_path, guids[i])
ret = stage_file(gpttmpl, stage % opts[i])
self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
# Process all gpos
ext.process_group_policy([], gpos)
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, opts[1], 'Higher priority policy was not set')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, None, 'MaxTicketAge should not have applied')
# Process just the first gpo
ext.process_group_policy([], gpos[:-1])
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, opts[0], 'Lower priority policy was not set')
# Remove policy
ext.process_group_policy(del_gpos, [])
for guid in guids:
gpttmpl = gpofile % (local_path, guid)
unstage_file(gpttmpl)
def test_gp_scripts(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_scripts_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
reg_key = b'Software\\Policies\\Samba\\Unix Settings'
sections = { b'%s\\Daily Scripts' % reg_key : '.cron.daily',
b'%s\\Monthly Scripts' % reg_key : '.cron.monthly',
b'%s\\Weekly Scripts' % reg_key : '.cron.weekly',
b'%s\\Hourly Scripts' % reg_key : '.cron.hourly' }
for keyname in sections.keys():
# Stage the Registry.pol file with test data
stage = preg.file()
e = preg.entry()
e.keyname = keyname
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
stage.num_entries = 1
stage.entries = [e]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory(sections[keyname]) as dname:
ext.process_group_policy([], gpos, dname)
scripts = os.listdir(dname)
self.assertEquals(len(scripts), 1,
'The %s script was not created' % keyname.decode())
out, _ = Popen([os.path.join(dname, scripts[0])], stdout=PIPE).communicate()
self.assertIn(b'hello world', out,
'%s script execution failed' % keyname.decode())
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_gp_sudoers(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_sudoers_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
stage.num_entries = 1
stage.entries = [e]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
sudoers = os.listdir(dname)
self.assertEquals(len(sudoers), 1, 'The sudoer file was not created')
self.assertIn(e.data,
open(os.path.join(dname, sudoers[0]), 'r').read(),
'The sudoers entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_vgp_sudoers(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/SUDO/SUDOERSCONFIGURATION/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_sudoers_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
sudoers_entry = etree.Element('sudoers_entry')
command = etree.Element('command')
command.text = 'ALL'
sudoers_entry.append(command)
user = etree.Element('user')
user.text = 'ALL'
sudoers_entry.append(user)
principal_list = etree.Element('listelement')
principal = etree.Element('principal')
principal.text = 'fakeu'
principal.attrib['type'] = 'user'
group = etree.Element('principal')
group.text = 'fakeg'
group.attrib['type'] = 'group'
principal_list.append(principal)
principal_list.append(group)
sudoers_entry.append(principal_list)
data.append(sudoers_entry)
# Ensure an empty principal doesn't cause a crash
sudoers_entry = etree.SubElement(data, 'sudoers_entry')
command = etree.SubElement(sudoers_entry, 'command')
command.text = 'ALL'
user = etree.SubElement(sudoers_entry, 'user')
user.text = 'ALL'
# Ensure having dispersed principals still works
sudoers_entry = etree.SubElement(data, 'sudoers_entry')
command = etree.SubElement(sudoers_entry, 'command')
command.text = 'ALL'
user = etree.SubElement(sudoers_entry, 'user')
user.text = 'ALL'
listelement = etree.SubElement(sudoers_entry, 'listelement')
principal = etree.SubElement(listelement, 'principal')
principal.text = 'fakeu2'
principal.attrib['type'] = 'user'
listelement = etree.SubElement(sudoers_entry, 'listelement')
group = etree.SubElement(listelement, 'principal')
group.text = 'fakeg2'
group.attrib['type'] = 'group'
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
data = 'fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL'
data2 = 'fakeu2,fakeg2% ALL=(ALL) NOPASSWD: ALL'
data_no_principal = 'ALL ALL=(ALL) NOPASSWD: ALL'
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
sudoers = os.listdir(dname)
self.assertEquals(len(sudoers), 3, 'The sudoer file was not created')
output = open(os.path.join(dname, sudoers[0]), 'r').read() + \
open(os.path.join(dname, sudoers[1]), 'r').read() + \
open(os.path.join(dname, sudoers[2]), 'r').read()
self.assertIn(data, output,
'The sudoers entry was not applied')
self.assertIn(data2, output,
'The sudoers entry was not applied')
self.assertIn(data_no_principal, output,
'The sudoers entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertEquals(len(os.listdir(dname)), 0,
'Unapply failed to cleanup scripts')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_gp_inf_ext_utf(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ext = gp_inf_ext(logger, self.lp, machine_creds, store)
test_data = '[Kerberos Policy]\nMaxTicketAge = 99\n'
with NamedTemporaryFile() as f:
with codecs.open(f.name, 'w', 'utf-16') as w:
w.write(test_data)
try:
inf_conf = ext.read(f.name)
except UnicodeDecodeError:
self.fail('Failed to parse utf-16')
self.assertIn('Kerberos Policy', inf_conf.keys(),
'Kerberos Policy was not read from the file')
self.assertEquals(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
'99', 'MaxTicketAge was not read from the file')
with NamedTemporaryFile() as f:
with codecs.open(f.name, 'w', 'utf-8') as w:
w.write(test_data)
inf_conf = ext.read(f.name)
self.assertIn('Kerberos Policy', inf_conf.keys(),
'Kerberos Policy was not read from the file')
self.assertEquals(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
'99', 'MaxTicketAge was not read from the file')
def test_rsop(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
local_path = self.lp.cache_path('gpo_cache')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
gp_extensions = []
gp_extensions.append(gp_krb_ext)
gp_extensions.append(gp_scripts_ext)
gp_extensions.append(gp_sudoers_ext)
gp_extensions.append(gp_smb_conf_ext)
gp_extensions.append(gp_msgs_ext)
# Create registry stage data
reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
reg_stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e2.type = 1
e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
e3 = preg.entry()
e3.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
e3.type = 4
e3.data = 1
e3.valuename = 'apply group policies'
e4 = preg.entry()
e4.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e4.valuename = b'issue'
e4.type = 1
e4.data = b'Welcome to \\s \\r \\l'
reg_stage.num_entries = 4
reg_stage.entries = [e, e2, e3, e4]
# Create krb stage date
gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF')
krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n' \
'[System Access]\nMinimumPasswordAge = 998\n'
for g in [g for g in gpos if g.file_sys_path]:
ret = stage_file(gpofile % g.name, krb_stage)
self.assertTrue(ret, 'Could not create the target %s' %
(gpofile % g.name))
ret = stage_file(reg_pol % g.name, ndr_pack(reg_stage))
self.assertTrue(ret, 'Could not create the target %s' %
(reg_pol % g.name))
for ext in gp_extensions:
ext = ext(logger, self.lp, machine_creds, store)
ret = ext.rsop(g)
self.assertEquals(len(ret.keys()), 1,
'A single policy should have been displayed')
# Check the Security Extension
if type(ext) == gp_krb_ext:
self.assertIn('Kerberos Policy', ret.keys(),
'Kerberos Policy not found')
self.assertIn('MaxTicketAge', ret['Kerberos Policy'],
'MaxTicketAge setting not found')
self.assertEquals(ret['Kerberos Policy']['MaxTicketAge'], '99',
'MaxTicketAge was not set to 99')
# Check the Scripts Extension
elif type(ext) == gp_scripts_ext:
self.assertIn('Daily Scripts', ret.keys(),
'Daily Scripts not found')
self.assertIn('echo hello world', ret['Daily Scripts'],
'Daily script was not created')
# Check the Sudoers Extension
elif type(ext) == gp_sudoers_ext:
self.assertIn('Sudo Rights', ret.keys(),
'Sudoers not found')
self.assertIn('fakeu ALL=(ALL) NOPASSWD: ALL',
ret['Sudo Rights'],
'Sudoers policy not created')
# Check the smb.conf Extension
elif type(ext) == gp_smb_conf_ext:
self.assertIn('smb.conf', ret.keys(),
'apply group policies was not applied')
self.assertIn(e3.valuename, ret['smb.conf'],
'apply group policies was not applied')
self.assertEquals(ret['smb.conf'][e3.valuename], e3.data,
'apply group policies was not set')
# Check the Messages Extension
elif type(ext) == gp_msgs_ext:
self.assertIn('/etc/issue', ret,
'Login Prompt Message not applied')
self.assertEquals(ret['/etc/issue'], e4.data,
'Login Prompt Message not set')
unstage_file(gpofile % g.name)
unstage_file(reg_pol % g.name)
# Check that a call to gpupdate --rsop also succeeds
ret = rsop(self.lp)
self.assertEquals(ret, 0, 'gpupdate --rsop failed!')
def test_gp_unapply(self):
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
gp_extensions = []
gp_extensions.append(gp_krb_ext)
gp_extensions.append(gp_scripts_ext)
gp_extensions.append(gp_sudoers_ext)
# Create registry stage data
reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
reg_stage = preg.file()
e = preg.entry()
e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e.type = 1
e.data = b'echo hello world'
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
e2.type = 1
e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
reg_stage.num_entries = 2
reg_stage.entries = [e, e2]
# Create krb stage date
gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
'WINDOWS NT/SECEDIT/GPTTMPL.INF')
krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n'
ret = stage_file(gpofile % guid, krb_stage)
self.assertTrue(ret, 'Could not create the target %s' %
(gpofile % guid))
ret = stage_file(reg_pol % guid, ndr_pack(reg_stage))
self.assertTrue(ret, 'Could not create the target %s' %
(reg_pol % guid))
# Process all gpos, with temp output directory
remove = []
with TemporaryDirectory() as dname:
for ext in gp_extensions:
ext = ext(logger, self.lp, machine_creds, store)
if type(ext) == gp_krb_ext:
ext.process_group_policy([], gpos)
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertEqual(ret, 99, 'Kerberos policy was not set')
elif type(ext) in [gp_scripts_ext, gp_sudoers_ext]:
ext.process_group_policy([], gpos, dname)
gp_db = store.get_gplog(machine_creds.get_username())
applied_settings = gp_db.get_applied_settings([guid])
for _, fname in applied_settings[-1][-1][str(ext)].items():
self.assertIn(dname, fname,
'Test file not created in tmp dir')
self.assertTrue(os.path.exists(fname),
'Test file not created')
remove.append(fname)
# Unapply policy, and ensure policies are removed
gpupdate_unapply(self.lp)
for fname in remove:
self.assertFalse(os.path.exists(fname),
'Unapply did not remove test file')
ret = store.get_int('kdc:user_ticket_lifetime')
self.assertNotEqual(ret, 99, 'Kerberos policy was not unapplied')
unstage_file(gpofile % guid)
unstage_file(reg_pol % guid)
def test_smb_conf_ext(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
entries = []
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\template homedir'
e.type = 1
e.data = '/home/samba/%D/%U'
e.valuename = 'template homedir'
entries.append(e)
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
e.type = 4
e.data = 1
e.valuename = 'apply group policies'
entries.append(e)
e = preg.entry()
e.keyname = 'Software\\Policies\\Samba\\smb_conf\\ldap timeout'
e.type = 4
e.data = 9999
e.valuename = 'ldap timeout'
entries.append(e)
stage = preg.file()
stage.num_entries = len(entries)
stage.entries = entries
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Failed to create the Registry.pol file')
with NamedTemporaryFile(suffix='_smb.conf') as f:
copyfile(self.lp.configfile, f.name)
lp = LoadParm(f.name)
# Initialize the group policy extension
ext = gp_smb_conf_ext(logger, lp, machine_creds, store)
ext.process_group_policy([], gpos)
lp = LoadParm(f.name)
template_homedir = lp.get('template homedir')
self.assertEquals(template_homedir, '/home/samba/%D/%U',
'template homedir was not applied')
apply_group_policies = lp.get('apply group policies')
self.assertTrue(apply_group_policies,
'apply group policies was not applied')
ldap_timeout = lp.get('ldap timeout')
self.assertEquals(ldap_timeout, 9999, 'ldap timeout was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
lp = LoadParm(f.name)
template_homedir = lp.get('template homedir')
self.assertEquals(template_homedir, self.lp.get('template homedir'),
'template homedir was not unapplied')
apply_group_policies = lp.get('apply group policies')
self.assertEquals(apply_group_policies, self.lp.get('apply group policies'),
'apply group policies was not unapplied')
ldap_timeout = lp.get('ldap timeout')
self.assertEquals(ldap_timeout, self.lp.get('ldap timeout'),
'ldap timeout was not unapplied')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_gp_motd(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_msgs_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
stage = preg.file()
e1 = preg.entry()
e1.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e1.valuename = b'motd'
e1.type = 1
e1.data = b'Have a lot of fun!'
stage.num_entries = 2
e2 = preg.entry()
e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
e2.valuename = b'issue'
e2.type = 1
e2.data = b'Welcome to \\s \\r \\l'
stage.entries = [e1, e2]
ret = stage_file(reg_pol, ndr_pack(stage))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
motd_file = os.path.join(dname, 'motd')
self.assertTrue(os.path.exists(motd_file),
'Message of the day file not created')
data = open(motd_file, 'r').read()
self.assertEquals(data, e1.data, 'Message of the day not applied')
issue_file = os.path.join(dname, 'issue')
self.assertTrue(os.path.exists(issue_file),
'Login Prompt Message file not created')
data = open(issue_file, 'r').read()
self.assertEquals(data, e2.data, 'Login Prompt Message not applied')
# Unapply policy, and ensure the test files are removed
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
data = open(motd_file, 'r').read()
self.assertFalse(data, 'Message of the day file not removed')
data = open(issue_file, 'r').read()
self.assertFalse(data, 'Login Prompt Message file not removed')
# Unstage the Registry.pol file
unstage_file(reg_pol)
def test_vgp_symlink(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/SYMLINK/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_symlink_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
with TemporaryDirectory() as dname:
test_source = os.path.join(dname, 'test.source')
test_target = os.path.join(dname, 'test.target')
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
file_properties = etree.Element('file_properties')
source = etree.Element('source')
source.text = test_source
file_properties.append(source)
target = etree.Element('target')
target.text = test_target
file_properties.append(target)
data.append(file_properties)
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Create test source
test_source_data = 'hello world!'
with open(test_source, 'w') as w:
w.write(test_source_data)
# Process all gpos, with temp output directory
ext.process_group_policy([], gpos)
self.assertTrue(os.path.exists(test_target),
'The test symlink was not created')
self.assertTrue(os.path.islink(test_target),
'The test file is not a symlink')
self.assertIn(test_source_data, open(test_target, 'r').read(),
'Reading from symlink does not produce source data')
# Unapply the policy, ensure removal
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertFalse(os.path.exists(test_target),
'The test symlink was not delete')
# Verify RSOP
ret = ext.rsop([g for g in gpos if g.name == guid][0])
self.assertIn('ln -s %s %s' % (test_source, test_target),
list(ret.values())[0])
# Unstage the manifest.xml file
unstage_file(manifest)
def test_vgp_files(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/FILES/MANIFEST.XML')
source_file = os.path.join(os.path.dirname(manifest), 'TEST.SOURCE')
source_data = '#!/bin/sh\necho hello world'
ret = stage_file(source_file, source_data)
self.assertTrue(ret, 'Could not create the target %s' % source_file)
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_files_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
with TemporaryDirectory() as dname:
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
file_properties = etree.SubElement(data, 'file_properties')
source = etree.SubElement(file_properties, 'source')
source.text = os.path.basename(source_file).lower()
target = etree.SubElement(file_properties, 'target')
target.text = os.path.join(dname, 'test.target')
user = etree.SubElement(file_properties, 'user')
user.text = pwd.getpwuid(os.getuid()).pw_name
group = etree.SubElement(file_properties, 'group')
group.text = grp.getgrgid(os.getgid()).gr_name
# Request permissions of 755
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'user')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'write')
etree.SubElement(permissions, 'execute')
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'group')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'execute')
permissions = etree.SubElement(file_properties, 'permissions')
permissions.set('type', 'other')
etree.SubElement(permissions, 'read')
etree.SubElement(permissions, 'execute')
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
ext.process_group_policy([], gpos)
self.assertTrue(os.path.exists(target.text),
'The target file does not exist')
self.assertEquals(os.stat(target.text).st_mode & 0o777, 0o755,
'The target file permissions are incorrect')
self.assertEquals(open(target.text).read(), source_data,
'The target file contents are incorrect')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
self.assertFalse(os.path.exists(target.text),
'The target file was not removed')
# Test rsop
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(target.text, list(ret.values())[0][0],
'The target file was not listed by rsop')
self.assertIn('-rwxr-xr-x', list(ret.values())[0][0],
'The target permissions were not listed by rsop')
# Unstage the manifest and source files
unstage_file(manifest)
unstage_file(source_file)
def test_vgp_openssh(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/SSHCFG/SSHD/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_openssh_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.Element('policysetting')
stage.append(policysetting)
version = etree.Element('version')
version.text = '1'
policysetting.append(version)
data = etree.Element('data')
configfile = etree.Element('configfile')
configsection = etree.Element('configsection')
sectionname = etree.Element('sectionname')
configsection.append(sectionname)
kvpair = etree.Element('keyvaluepair')
key = etree.Element('key')
key.text = 'AddressFamily'
kvpair.append(key)
value = etree.Element('value')
value.text = 'inet6'
kvpair.append(value)
configsection.append(kvpair)
configfile.append(configsection)
data.append(configfile)
policysetting.append(data)
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
data = 'AddressFamily inet6'
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
conf = os.listdir(dname)
self.assertEquals(len(conf), 1, 'The conf file was not created')
gp_cfg = os.path.join(dname, conf[0])
self.assertIn(data, open(gp_cfg, 'r').read(),
'The sshd_config entry was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
self.assertFalse(os.path.exists(gp_cfg),
'Unapply failed to cleanup config')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_vgp_startup_scripts(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/SCRIPTS/STARTUP/MANIFEST.XML')
test_script = os.path.join(os.path.dirname(manifest), 'TEST.SH')
test_data = '#!/bin/sh\necho $@ hello world'
ret = stage_file(test_script, test_data)
self.assertTrue(ret, 'Could not create the target %s' % test_script)
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_startup_scripts_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
listelement = etree.SubElement(data, 'listelement')
script = etree.SubElement(listelement, 'script')
script.text = os.path.basename(test_script).lower()
parameters = etree.SubElement(listelement, 'parameters')
parameters.text = '-n'
hash = etree.SubElement(listelement, 'hash')
hash.text = \
hashlib.md5(open(test_script, 'rb').read()).hexdigest().upper()
run_as = etree.SubElement(listelement, 'run_as')
run_as.text = 'root'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 1,
'The target script was not created')
entry = '@reboot %s %s %s' % (run_as.text, test_script,
parameters.text)
self.assertIn(entry,
open(os.path.join(dname, files[0]), 'r').read(),
'The test entry was not found')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
files = os.listdir(dname)
self.assertEquals(len(files), 0,
'The target script was not removed')
# Test rsop
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(entry, list(ret.values())[0][0],
'The target entry was not listed by rsop')
# Unstage the manifest.xml and script files
unstage_file(manifest)
unstage_file(test_script)
# Stage the manifest.xml file for run once scripts
etree.SubElement(listelement, 'run_once')
run_as.text = pwd.getpwuid(os.getuid()).pw_name
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
# A run once script will be executed immediately,
# instead of creating a cron job
with TemporaryDirectory() as dname:
test_file = '%s/TESTING.txt' % dname
test_data = '#!/bin/sh\ntouch %s' % test_file
ret = stage_file(test_script, test_data)
self.assertTrue(ret, 'Could not create the target %s' % test_script)
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 1,
'The test file was not created')
self.assertEquals(files[0], os.path.basename(test_file),
'The test file was not created')
# Unlink the test file and ensure that processing
# policy again does not recreate it.
os.unlink(test_file)
ext.process_group_policy([], gpos, dname)
files = os.listdir(dname)
self.assertEquals(len(files), 0,
'The test file should not have been created')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [])
# Test rsop
entry = 'Run once as: %s `%s %s`' % (run_as.text, test_script,
parameters.text)
g = [g for g in gpos if g.name == guid][0]
ret = ext.rsop(g)
self.assertIn(entry, list(ret.values())[0][0],
'The target entry was not listed by rsop')
# Unstage the manifest.xml and script files
unstage_file(manifest)
unstage_file(test_script)
def test_vgp_motd(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/MOTD/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_motd_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
filename = etree.SubElement(data, 'filename')
filename.text = 'motd'
text = etree.SubElement(data, 'text')
text.text = 'This is the message of the day'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with NamedTemporaryFile() as f:
ext.process_group_policy([], gpos, f.name)
self.assertEquals(open(f.name, 'r').read(), text.text,
'The motd was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], f.name)
self.assertNotEquals(open(f.name, 'r').read(), text.text,
'The motd was not unapplied')
# Unstage the Registry.pol file
unstage_file(manifest)
def test_vgp_issue(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
manifest = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/UNIX/ISSUE/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_issue_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml file with test data
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '1'
data = etree.SubElement(policysetting, 'data')
filename = etree.SubElement(data, 'filename')
filename.text = 'issue'
text = etree.SubElement(data, 'text')
text.text = 'Welcome to Samba!'
ret = stage_file(manifest, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % manifest)
# Process all gpos, with temp output directory
with NamedTemporaryFile() as f:
ext.process_group_policy([], gpos, f.name)
self.assertEquals(open(f.name, 'r').read(), text.text,
'The issue was not applied')
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], f.name)
self.assertNotEquals(open(f.name, 'r').read(), text.text,
'The issue was not unapplied')
# Unstage the manifest.xml file
unstage_file(manifest)
def test_vgp_access(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
allow = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/VAS/HOSTACCESSCONTROL/ALLOW/MANIFEST.XML')
deny = os.path.join(local_path, policies, guid, 'MACHINE',
'VGP/VTLA/VAS/HOSTACCESSCONTROL/DENY/MANIFEST.XML')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = vgp_access_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the manifest.xml allow file
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '2'
apply_mode = etree.SubElement(policysetting, 'apply_mode')
apply_mode.text = 'merge'
data = etree.SubElement(policysetting, 'data')
# Add an allowed user
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'USER'
entry = etree.SubElement(listelement, 'entry')
entry.text = 'goodguy@%s' % realm
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'goodguy'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'user'
# Add an allowed group
groupattr = etree.SubElement(data, 'groupattr')
groupattr.text = 'samAccountName'
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'GROUP'
entry = etree.SubElement(listelement, 'entry')
entry.text = '%s\\goodguys' % realm
dn = etree.SubElement(listelement, 'dn')
dn.text = 'CN=goodguys,CN=Users,%s' % base_dn
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'goodguys'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'group'
ret = stage_file(allow, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % allow)
# Stage the manifest.xml deny file
stage = etree.Element('vgppolicy')
policysetting = etree.SubElement(stage, 'policysetting')
version = etree.SubElement(policysetting, 'version')
version.text = '2'
apply_mode = etree.SubElement(policysetting, 'apply_mode')
apply_mode.text = 'merge'
data = etree.SubElement(policysetting, 'data')
# Add a denied user
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'USER'
entry = etree.SubElement(listelement, 'entry')
entry.text = 'badguy@%s' % realm
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'badguy'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'user'
# Add a denied group
groupattr = etree.SubElement(data, 'groupattr')
groupattr.text = 'samAccountName'
listelement = etree.SubElement(data, 'listelement')
otype = etree.SubElement(listelement, 'type')
otype.text = 'GROUP'
entry = etree.SubElement(listelement, 'entry')
entry.text = '%s\\badguys' % realm
dn = etree.SubElement(listelement, 'dn')
dn.text = 'CN=badguys,CN=Users,%s' % base_dn
adobject = etree.SubElement(listelement, 'adobject')
name = etree.SubElement(adobject, 'name')
name.text = 'badguys'
domain = etree.SubElement(adobject, 'domain')
domain.text = realm
otype = etree.SubElement(adobject, 'type')
otype.text = 'group'
ret = stage_file(deny, etree.tostring(stage))
self.assertTrue(ret, 'Could not create the target %s' % deny)
# Process all gpos, with temp output directory
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
conf = os.listdir(dname)
self.assertEquals(len(conf), 1, 'The conf file was not created')
gp_cfg = os.path.join(dname, conf[0])
# Check the access config for the correct access.conf entries
print('Config file %s found' % gp_cfg)
data = open(gp_cfg, 'r').read()
self.assertIn('+:%s\\goodguy:ALL' % realm, data)
self.assertIn('+:%s\\goodguys:ALL' % realm, data)
self.assertIn('-:%s\\badguy:ALL' % realm, data)
self.assertIn('-:%s\\badguys:ALL' % realm, data)
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
self.assertFalse(os.path.exists(gp_cfg),
'Unapply failed to cleanup config')
# Unstage the manifest.pol files
unstage_file(allow)
unstage_file(deny)
def test_gnome_settings(self):
local_path = self.lp.cache_path('gpo_cache')
guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
reg_pol = os.path.join(local_path, policies, guid,
'MACHINE/REGISTRY.POL')
logger = logging.getLogger('gpo_tests')
cache_dir = self.lp.get('cache directory')
store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
machine_creds = Credentials()
machine_creds.guess(self.lp)
machine_creds.set_machine_account()
# Initialize the group policy extension
ext = gp_gnome_settings_ext(logger, self.lp, machine_creds, store)
ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
if ads.connect():
gpos = ads.get_gpo_list(machine_creds.get_username())
# Stage the Registry.pol file with test data
parser = GPPolParser()
parser.load_xml(etree.fromstring(gnome_test_reg_pol.strip()))
ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
with TemporaryDirectory() as dname:
ext.process_group_policy([], gpos, dname)
local_db = os.path.join(dname, 'etc/dconf/db/local.d')
self.assertTrue(os.path.isdir(local_db),
'Local db dir not created')
def db_check(name, data, count=1):
db = glob(os.path.join(local_db, '*-%s' % name))
self.assertEquals(len(db), count, '%s not created' % name)
file_contents = ConfigParser()
file_contents.read(db)
for key in data.keys():
self.assertTrue(file_contents.has_section(key),
'Section %s not found' % key)
options = data[key]
for k, v in options.items():
v_content = file_contents.get(key, k)
self.assertEqual(v_content, v,
'%s: %s != %s' % (key, v_content, v))
def del_db_check(name):
db = glob(os.path.join(local_db, '*-%s' % name))
self.assertEquals(len(db), 0, '%s not deleted' % name)
locks = os.path.join(local_db, 'locks')
self.assertTrue(os.path.isdir(local_db), 'Locks dir not created')
def lock_check(name, items, count=1):
lock = glob(os.path.join(locks, '*%s' % name))
self.assertEquals(len(lock), count,
'%s lock not created' % name)
file_contents = []
for i in range(count):
file_contents.extend(open(lock[i], 'r').read().split('\n'))
for data in items:
self.assertIn(data, file_contents,
'%s lock not created' % data)
def del_lock_check(name):
lock = glob(os.path.join(locks, '*%s' % name))
self.assertEquals(len(lock), 0, '%s lock not deleted' % name)
# Check the user profile
user_profile = os.path.join(dname, 'etc/dconf/profile/user')
self.assertTrue(os.path.exists(user_profile),
'User profile not created')
# Enable the compose key
data = { 'org/gnome/desktop/input-sources':
{ 'xkb-options': '[\'compose:ralt\']' }
}
db_check('input-sources', data)
items = ['/org/gnome/desktop/input-sources/xkb-options']
lock_check('input-sources', items)
# Dim screen when user is idle
data = { 'org/gnome/settings-daemon/plugins/power':
{ 'idle-dim': 'true',
'idle-brightness': '30'
}
}
db_check('power', data)
data = { 'org/gnome/desktop/session':
{ 'idle-delay': 'uint32 300' }
}
db_check('session', data)
items = ['/org/gnome/settings-daemon/plugins/power/idle-dim',
'/org/gnome/settings-daemon/plugins/power/idle-brightness',
'/org/gnome/desktop/session/idle-delay']
lock_check('power-saving', items)
# Lock down specific settings
bg_locks = ['/org/gnome/desktop/background/picture-uri',
'/org/gnome/desktop/background/picture-options',
'/org/gnome/desktop/background/primary-color',
'/org/gnome/desktop/background/secondary-color']
lock_check('group-policy', bg_locks)
# Lock down enabled extensions
data = { 'org/gnome/shell':
{ 'enabled-extensions':
'[\'[email protected]\', \'[email protected]\']',
'development-tools': 'false' }
}
db_check('extensions', data)
items = [ '/org/gnome/shell/enabled-extensions',
'/org/gnome/shell/development-tools' ]
lock_check('extensions', items)
# Disallow login using a fingerprint
data = { 'org/gnome/login-screen':
{ 'enable-fingerprint-authentication': 'false' }
}
db_check('fingerprintreader', data)
items = ['/org/gnome/login-screen/enable-fingerprint-authentication']
lock_check('fingerprintreader', items)
# Disable user logout and user switching
data = { 'org/gnome/desktop/lockdown':
{ 'disable-log-out': 'true',
'disable-user-switching': 'true' }
}
db_check('logout', data, 2)
items = ['/org/gnome/desktop/lockdown/disable-log-out',
'/org/gnome/desktop/lockdown/disable-user-switching']
lock_check('logout', items, 2)
# Disable repartitioning
actions = os.path.join(dname, 'etc/share/polkit-1/actions')
udisk2 = glob(os.path.join(actions,
'org.freedesktop.[u|U][d|D]isks2.policy'))
self.assertEquals(len(udisk2), 1, 'udisk2 policy not created')
udisk2_tree = etree.fromstring(open(udisk2[0], 'r').read())
actions = udisk2_tree.findall('action')
md = 'org.freedesktop.udisks2.modify-device'
action = [a for a in actions if a.attrib['id'] == md]
self.assertEquals(len(action), 1, 'modify-device not found')
defaults = action[0].find('defaults')
self.assertTrue(defaults is not None,
'modify-device defaults not found')
allow_any = defaults.find('allow_any').text
self.assertEquals(allow_any, 'no',
'modify-device allow_any not set to no')
allow_inactive = defaults.find('allow_inactive').text
self.assertEquals(allow_inactive, 'no',
'modify-device allow_inactive not set to no')
allow_active = defaults.find('allow_active').text
self.assertEquals(allow_active, 'yes',
'modify-device allow_active not set to yes')
# Disable printing
data = { 'org/gnome/desktop/lockdown':
{ 'disable-printing': 'true' }
}
db_check('printing', data)
items = ['/org/gnome/desktop/lockdown/disable-printing']
lock_check('printing', items)
# Disable file saving
data = { 'org/gnome/desktop/lockdown':
{ 'disable-save-to-disk': 'true' }
}
db_check('filesaving', data)
items = ['/org/gnome/desktop/lockdown/disable-save-to-disk']
lock_check('filesaving', items)
# Disable command-line access
data = { 'org/gnome/desktop/lockdown':
{ 'disable-command-line': 'true' }
}
db_check('cmdline', data)
items = ['/org/gnome/desktop/lockdown/disable-command-line']
lock_check('cmdline', items)
# Allow or disallow online accounts
data = { 'org/gnome/online-accounts':
{ 'whitelisted-providers': '[\'google\']' }
}
db_check('goa', data)
items = ['/org/gnome/online-accounts/whitelisted-providers']
lock_check('goa', items)
# Verify RSOP does not fail
ext.rsop([g for g in gpos if g.name == guid][0])
# Remove policy
gp_db = store.get_gplog(machine_creds.get_username())
del_gpos = get_deleted_gpos_list(gp_db, [])
ext.process_group_policy(del_gpos, [], dname)
del_db_check('input-sources')
del_lock_check('input-sources')
del_db_check('power')
del_db_check('session')
del_lock_check('power-saving')
del_lock_check('group-policy')
del_db_check('extensions')
del_lock_check('extensions')
del_db_check('fingerprintreader')
del_lock_check('fingerprintreader')
del_db_check('logout')
del_lock_check('logout')
actions = os.path.join(dname, 'etc/share/polkit-1/actions')
udisk2 = glob(os.path.join(actions,
'org.freedesktop.[u|U][d|D]isks2.policy'))
self.assertEquals(len(udisk2), 0, 'udisk2 policy not deleted')
del_db_check('printing')
del_lock_check('printing')
del_db_check('filesaving')
del_lock_check('filesaving')
del_db_check('cmdline')
del_lock_check('cmdline')
del_db_check('goa')
del_lock_check('goa')
# Unstage the Registry.pol file
unstage_file(reg_pol)
| gpl-3.0 | -4,109,212,890,846,621,000 | 43.150913 | 96 | 0.578379 | false | 3.672504 | true | false | false |
Motwani/firefox-ui-tests | firefox_ui_tests/functional/security/test_safe_browsing_notification.py | 3 | 6433 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from marionette_driver import By, expected, Wait
from firefox_puppeteer.testcases import FirefoxTestCase
class TestSafeBrowsingNotificationBar(FirefoxTestCase):
def setUp(self):
FirefoxTestCase.setUp(self)
self.test_data = [
# Unwanted software URL
{
# First two properties are not needed,
# since these errors are not reported
'button_property': None,
'report_page': None,
'unsafe_page': 'https://www.itisatrap.org/firefox/unwanted.html'
},
# Phishing URL info
{
'button_property': 'safebrowsing.notAForgeryButton.label',
'report_page': 'www.google.com/safebrowsing/report_error',
'unsafe_page': 'https://www.itisatrap.org/firefox/its-a-trap.html'
},
# Malware URL object
{
'button_property': 'safebrowsing.notAnAttackButton.label',
'report_page': 'www.stopbadware.org',
'unsafe_page': 'https://www.itisatrap.org/firefox/its-an-attack.html'
}
]
self.prefs.set_pref('browser.safebrowsing.enabled', True)
self.prefs.set_pref('browser.safebrowsing.malware.enabled', True)
# Give the browser a little time, because SafeBrowsing.jsm takes a while
# between start up and adding the example urls to the db.
# hg.mozilla.org/mozilla-central/file/46aebcd9481e/browser/base/content/browser.js#l1194
time.sleep(3)
# TODO: Bug 1139544: While we don't have a reliable way to close the safe browsing
# notification bar when a test fails, run this test in a new tab.
self.browser.tabbar.open_tab()
def tearDown(self):
try:
self.utils.remove_perms('https://www.itisatrap.org', 'safe-browsing')
self.browser.tabbar.close_all_tabs([self.browser.tabbar.tabs[0]])
finally:
FirefoxTestCase.tearDown(self)
def test_notification_bar(self):
with self.marionette.using_context('content'):
for item in self.test_data:
button_property = item['button_property']
report_page, unsafe_page = item['report_page'], item['unsafe_page']
# Navigate to the unsafe page
# Check "ignore warning" link then notification bar's "not badware" button
# Only do this if feature supports it
if button_property is not None:
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_not_badware_button(button_property, report_page)
# Return to the unsafe page
# Check "ignore warning" link then notification bar's "get me out" button
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_get_me_out_of_here_button()
# Return to the unsafe page
# Check "ignore warning" link then notification bar's "X" button
self.marionette.navigate(unsafe_page)
# Wait for the DOM to receive events for about:blocked
time.sleep(1)
self.check_ignore_warning_button(unsafe_page)
self.check_x_button()
def check_ignore_warning_button(self, unsafe_page):
button = self.marionette.find_element(By.ID, 'ignoreWarningButton')
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
expected.element_present(By.ID, 'main-feature'))
self.assertEquals(self.marionette.get_url(), self.browser.get_final_url(unsafe_page))
# Clean up here since the permission gets set in this function
self.utils.remove_perms('https://www.itisatrap.org', 'safe-browsing')
# Check the not a forgery or attack button in the notification bar
def check_not_badware_button(self, button_property, report_page):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
label = self.browser.get_property(button_property)
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'label': label}))
self.browser.tabbar.open_tab(lambda _: button.click())
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
lambda mn: report_page in mn.get_url())
with self.marionette.using_context('chrome'):
self.browser.tabbar.close_tab()
def check_get_me_out_of_here_button(self):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
label = self.browser.get_property('safebrowsing.getMeOutOfHereButton.label')
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'label': label}))
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
lambda mn: self.browser.default_homepage in mn.get_url())
def check_x_button(self):
with self.marionette.using_context('chrome'):
# TODO: update to use safe browsing notification bar class when bug 1139544 lands
button = (self.marionette.find_element(By.ID, 'content')
.find_element('anon attribute', {'value': 'blocked-badware-page'})
.find_element('anon attribute',
{'class': 'messageCloseButton close-icon tabbable'}))
button.click()
Wait(self.marionette, timeout=self.browser.timeout_page_load).until(
expected.element_stale(button))
| mpl-2.0 | 7,661,383,468,471,206,000 | 45.615942 | 96 | 0.609358 | false | 3.988221 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.